blob: dceb0eb2bed1621e5530a5d55a2d301b7e8e04af [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/kref.h>
34#include <rdma/ib_umem.h>
Yann Droneauda8237b32014-05-05 19:33:21 +020035#include <rdma/ib_user_verbs.h>
Sagi Grimbergb6364012015-09-02 22:23:04 +030036#include <rdma/ib_cache.h>
Eli Cohene126ba92013-07-07 17:25:49 +030037#include "mlx5_ib.h"
Leon Romanovskyf02d0d62018-11-28 20:53:37 +020038#include "srq.h"
Leon Romanovsky333fbaa2020-04-04 10:40:24 +030039#include "qp.h"
Eli Cohene126ba92013-07-07 17:25:49 +030040
Yishai Hadas4e0e2ea2019-06-30 19:23:27 +030041static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
Eli Cohene126ba92013-07-07 17:25:49 +030042{
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
44
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
46}
47
48static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
49{
50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
52 struct ib_cq *ibcq = &cq->ibcq;
53 struct ib_event event;
54
55 if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
56 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
57 type, mcq->cqn);
58 return;
59 }
60
61 if (ibcq->event_handler) {
62 event.device = &dev->ib_dev;
63 event.event = IB_EVENT_CQ_ERR;
64 event.element.cq = ibcq;
65 ibcq->event_handler(&event, ibcq->cq_context);
66 }
67}
68
Eli Cohene126ba92013-07-07 17:25:49 +030069static void *get_cqe(struct mlx5_ib_cq *cq, int n)
70{
Yonatan Cohen388ca8b2018-01-02 16:08:06 +020071 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
Eli Cohene126ba92013-07-07 17:25:49 +030072}
73
Eli Cohenbde51582014-01-14 17:45:18 +020074static u8 sw_ownership_bit(int n, int nent)
75{
76 return (n & nent) ? 1 : 0;
77}
78
Eli Cohene126ba92013-07-07 17:25:49 +030079static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
80{
81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82 struct mlx5_cqe64 *cqe64;
83
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
Eli Cohenbde51582014-01-14 17:45:18 +020085
Tariq Toukanbdefffd2018-12-04 18:03:02 -080086 if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
Eli Cohenbde51582014-01-14 17:45:18 +020087 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
88 return cqe;
89 } else {
90 return NULL;
91 }
Eli Cohene126ba92013-07-07 17:25:49 +030092}
93
94static void *next_cqe_sw(struct mlx5_ib_cq *cq)
95{
96 return get_sw_cqe(cq, cq->mcq.cons_index);
97}
98
99static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
100{
101 switch (wq->wr_data[idx]) {
102 case MLX5_IB_WR_UMR:
103 return 0;
104
105 case IB_WR_LOCAL_INV:
106 return IB_WC_LOCAL_INV;
107
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300108 case IB_WR_REG_MR:
109 return IB_WC_REG_MR;
110
Eli Cohene126ba92013-07-07 17:25:49 +0300111 default:
112 pr_warn("unknown completion status\n");
113 return 0;
114 }
115}
116
117static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
118 struct mlx5_ib_wq *wq, int idx)
119{
120 wc->wc_flags = 0;
121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
122 case MLX5_OPCODE_RDMA_WRITE_IMM:
123 wc->wc_flags |= IB_WC_WITH_IMM;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500124 fallthrough;
Eli Cohene126ba92013-07-07 17:25:49 +0300125 case MLX5_OPCODE_RDMA_WRITE:
126 wc->opcode = IB_WC_RDMA_WRITE;
127 break;
128 case MLX5_OPCODE_SEND_IMM:
129 wc->wc_flags |= IB_WC_WITH_IMM;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500130 fallthrough;
Eli Cohene126ba92013-07-07 17:25:49 +0300131 case MLX5_OPCODE_SEND:
132 case MLX5_OPCODE_SEND_INVAL:
133 wc->opcode = IB_WC_SEND;
134 break;
135 case MLX5_OPCODE_RDMA_READ:
136 wc->opcode = IB_WC_RDMA_READ;
137 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
138 break;
139 case MLX5_OPCODE_ATOMIC_CS:
140 wc->opcode = IB_WC_COMP_SWAP;
141 wc->byte_len = 8;
142 break;
143 case MLX5_OPCODE_ATOMIC_FA:
144 wc->opcode = IB_WC_FETCH_ADD;
145 wc->byte_len = 8;
146 break;
147 case MLX5_OPCODE_ATOMIC_MASKED_CS:
148 wc->opcode = IB_WC_MASKED_COMP_SWAP;
149 wc->byte_len = 8;
150 break;
151 case MLX5_OPCODE_ATOMIC_MASKED_FA:
152 wc->opcode = IB_WC_MASKED_FETCH_ADD;
153 wc->byte_len = 8;
154 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300155 case MLX5_OPCODE_UMR:
156 wc->opcode = get_umr_comp(wq, idx);
157 break;
158 }
159}
160
161enum {
162 MLX5_GRH_IN_BUFFER = 1,
163 MLX5_GRH_IN_CQE = 2,
164};
165
166static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
167 struct mlx5_ib_qp *qp)
168{
Achiad Shochatcb34be62015-12-23 18:47:22 +0200169 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
Eli Cohene126ba92013-07-07 17:25:49 +0300170 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
171 struct mlx5_ib_srq *srq;
172 struct mlx5_ib_wq *wq;
173 u16 wqe_ctr;
Moni Shoua12f8fed2017-04-20 13:26:54 +0300174 u8 roce_packet_type;
175 bool vlan_present;
Eli Cohene126ba92013-07-07 17:25:49 +0300176 u8 g;
177
178 if (qp->ibqp.srq || qp->ibqp.xrcd) {
179 struct mlx5_core_srq *msrq = NULL;
180
181 if (qp->ibqp.xrcd) {
Leon Romanovskyb4990802018-11-28 20:53:40 +0200182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
Eli Cohene126ba92013-07-07 17:25:49 +0300183 srq = to_mibsrq(msrq);
184 } else {
185 srq = to_msrq(qp->ibqp.srq);
186 }
187 if (srq) {
188 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
189 wc->wr_id = srq->wrid[wqe_ctr];
190 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
Moni Shoua10f56242019-01-22 08:48:40 +0200191 if (msrq)
192 mlx5_core_res_put(&msrq->common);
Eli Cohene126ba92013-07-07 17:25:49 +0300193 }
194 } else {
195 wq = &qp->rq;
196 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
197 ++wq->tail;
198 }
199 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
200
Tariq Toukanbdefffd2018-12-04 18:03:02 -0800201 switch (get_cqe_opcode(cqe)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300202 case MLX5_CQE_RESP_WR_IMM:
203 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
204 wc->wc_flags = IB_WC_WITH_IMM;
Raed Salem244faed2020-04-24 12:45:05 -0700205 wc->ex.imm_data = cqe->immediate;
Eli Cohene126ba92013-07-07 17:25:49 +0300206 break;
207 case MLX5_CQE_RESP_SEND:
208 wc->opcode = IB_WC_RECV;
Erez Shitritc7ce8332016-02-21 16:27:18 +0200209 wc->wc_flags = IB_WC_IP_CSUM_OK;
210 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
211 (cqe->hds_ip_ext & CQE_L4_OK))))
212 wc->wc_flags = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300213 break;
214 case MLX5_CQE_RESP_SEND_IMM:
215 wc->opcode = IB_WC_RECV;
216 wc->wc_flags = IB_WC_WITH_IMM;
Raed Salem244faed2020-04-24 12:45:05 -0700217 wc->ex.imm_data = cqe->immediate;
Eli Cohene126ba92013-07-07 17:25:49 +0300218 break;
219 case MLX5_CQE_RESP_SEND_INV:
220 wc->opcode = IB_WC_RECV;
221 wc->wc_flags = IB_WC_WITH_INVALIDATE;
Raed Salem244faed2020-04-24 12:45:05 -0700222 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
Eli Cohene126ba92013-07-07 17:25:49 +0300223 break;
224 }
Eli Cohene126ba92013-07-07 17:25:49 +0300225 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
226 wc->dlid_path_bits = cqe->ml_path;
227 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
228 wc->wc_flags |= g ? IB_WC_GRH : 0;
Sagi Grimbergb6364012015-09-02 22:23:04 +0300229 if (unlikely(is_qp1(qp->ibqp.qp_type))) {
Raed Salem244faed2020-04-24 12:45:05 -0700230 u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
Sagi Grimbergb6364012015-09-02 22:23:04 +0300231
232 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
233 &wc->pkey_index);
234 } else {
235 wc->pkey_index = 0;
236 }
Achiad Shochatcb34be62015-12-23 18:47:22 +0200237
Moni Shoua12f8fed2017-04-20 13:26:54 +0300238 if (ll != IB_LINK_LAYER_ETHERNET) {
Moni Shoua65389322018-02-25 13:39:54 +0200239 wc->slid = be16_to_cpu(cqe->slid);
Moni Shoua12f8fed2017-04-20 13:26:54 +0300240 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
Achiad Shochatcb34be62015-12-23 18:47:22 +0200241 return;
Moni Shoua12f8fed2017-04-20 13:26:54 +0300242 }
Achiad Shochatcb34be62015-12-23 18:47:22 +0200243
Moni Shoua65389322018-02-25 13:39:54 +0200244 wc->slid = 0;
Moni Shoua12f8fed2017-04-20 13:26:54 +0300245 vlan_present = cqe->l4_l3_hdr_type & 0x1;
246 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
247 if (vlan_present) {
248 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
249 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
250 wc->wc_flags |= IB_WC_WITH_VLAN;
251 } else {
252 wc->sl = 0;
253 }
254
255 switch (roce_packet_type) {
Achiad Shochatcb34be62015-12-23 18:47:22 +0200256 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
257 wc->network_hdr_type = RDMA_NETWORK_IB;
258 break;
259 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
260 wc->network_hdr_type = RDMA_NETWORK_IPV6;
261 break;
262 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
263 wc->network_hdr_type = RDMA_NETWORK_IPV4;
264 break;
265 }
266 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
Eli Cohene126ba92013-07-07 17:25:49 +0300267}
268
269static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
270{
Eli Cohene126ba92013-07-07 17:25:49 +0300271 mlx5_ib_warn(dev, "dump error cqe\n");
Eran Ben Elisha1acae6b2017-12-31 12:55:26 +0200272 mlx5_dump_err_cqe(dev->mdev, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300273}
274
275static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
276 struct mlx5_err_cqe *cqe,
277 struct ib_wc *wc)
278{
279 int dump = 1;
280
281 switch (cqe->syndrome) {
282 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
283 wc->status = IB_WC_LOC_LEN_ERR;
284 break;
285 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
286 wc->status = IB_WC_LOC_QP_OP_ERR;
287 break;
288 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
289 wc->status = IB_WC_LOC_PROT_ERR;
290 break;
291 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
292 dump = 0;
293 wc->status = IB_WC_WR_FLUSH_ERR;
294 break;
295 case MLX5_CQE_SYNDROME_MW_BIND_ERR:
296 wc->status = IB_WC_MW_BIND_ERR;
297 break;
298 case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
299 wc->status = IB_WC_BAD_RESP_ERR;
300 break;
301 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
302 wc->status = IB_WC_LOC_ACCESS_ERR;
303 break;
304 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
305 wc->status = IB_WC_REM_INV_REQ_ERR;
306 break;
307 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
308 wc->status = IB_WC_REM_ACCESS_ERR;
309 break;
310 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
311 wc->status = IB_WC_REM_OP_ERR;
312 break;
313 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
314 wc->status = IB_WC_RETRY_EXC_ERR;
315 dump = 0;
316 break;
317 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
318 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
319 dump = 0;
320 break;
321 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
322 wc->status = IB_WC_REM_ABORT_ERR;
323 break;
324 default:
325 wc->status = IB_WC_GENERAL_ERR;
326 break;
327 }
328
329 wc->vendor_err = cqe->vendor_err_synd;
330 if (dump)
331 dump_cqe(dev, cqe);
332}
333
Leon Romanovsky950bf4f2020-03-18 11:16:40 +0200334static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
335 u16 tail, u16 head)
336{
337 u16 idx;
338
339 do {
340 idx = tail & (qp->sq.wqe_cnt - 1);
341 if (idx == head)
342 break;
343
344 tail = qp->sq.w_list[idx].next;
345 } while (1);
346 tail = qp->sq.w_list[idx].next;
347 qp->sq.last_poll = tail;
348}
349
Eli Cohenbde51582014-01-14 17:45:18 +0200350static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
351{
Tariq Toukan4972e6f2018-09-12 15:36:41 +0300352 mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
Eli Cohenbde51582014-01-14 17:45:18 +0200353}
354
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200355static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
356 struct ib_sig_err *item)
357{
358 u16 syndrome = be16_to_cpu(cqe->syndrome);
359
360#define GUARD_ERR (1 << 13)
361#define APPTAG_ERR (1 << 12)
362#define REFTAG_ERR (1 << 11)
363
364 if (syndrome & GUARD_ERR) {
365 item->err_type = IB_SIG_BAD_GUARD;
366 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
367 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
368 } else
369 if (syndrome & REFTAG_ERR) {
370 item->err_type = IB_SIG_BAD_REFTAG;
371 item->expected = be32_to_cpu(cqe->expected_reftag);
372 item->actual = be32_to_cpu(cqe->actual_reftag);
373 } else
374 if (syndrome & APPTAG_ERR) {
375 item->err_type = IB_SIG_BAD_APPTAG;
376 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
377 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
378 } else {
379 pr_err("Got signature completion error with bad syndrome %04x\n",
380 syndrome);
381 }
382
383 item->sig_err_offset = be64_to_cpu(cqe->err_offset);
384 item->key = be32_to_cpu(cqe->mkey);
385}
386
Leon Romanovsky8e3b6882018-12-12 19:45:53 +0200387static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
Leon Romanovsky950bf4f2020-03-18 11:16:40 +0200388 int *npolled, bool is_send)
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300389{
390 struct mlx5_ib_wq *wq;
391 unsigned int cur;
392 int np;
393 int i;
394
Leon Romanovsky8e3b6882018-12-12 19:45:53 +0200395 wq = (is_send) ? &qp->sq : &qp->rq;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300396 cur = wq->head - wq->tail;
397 np = *npolled;
398
399 if (cur == 0)
400 return;
401
402 for (i = 0; i < cur && np < num_entries; i++) {
Leon Romanovsky950bf4f2020-03-18 11:16:40 +0200403 unsigned int idx;
404
405 idx = (is_send) ? wq->last_poll : wq->tail;
406 idx &= (wq->wqe_cnt - 1);
407 wc->wr_id = wq->wrid[idx];
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300408 wc->status = IB_WC_WR_FLUSH_ERR;
409 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
410 wq->tail++;
Leon Romanovsky950bf4f2020-03-18 11:16:40 +0200411 if (is_send)
412 wq->last_poll = wq->w_list[idx].next;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300413 np++;
414 wc->qp = &qp->ibqp;
415 wc++;
416 }
417 *npolled = np;
418}
419
420static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
421 struct ib_wc *wc, int *npolled)
422{
423 struct mlx5_ib_qp *qp;
424
425 *npolled = 0;
Talat Batheesh4edf8d52017-08-17 15:50:44 +0300426 /* Find uncompleted WQEs belonging to that cq and return mmics ones */
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300427 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
Leon Romanovsky8e3b6882018-12-12 19:45:53 +0200428 sw_comp(qp, num_entries, wc + *npolled, npolled, true);
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300429 if (*npolled >= num_entries)
430 return;
431 }
432
433 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
Leon Romanovsky8e3b6882018-12-12 19:45:53 +0200434 sw_comp(qp, num_entries, wc + *npolled, npolled, false);
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300435 if (*npolled >= num_entries)
436 return;
437 }
438}
439
Eli Cohene126ba92013-07-07 17:25:49 +0300440static int mlx5_poll_one(struct mlx5_ib_cq *cq,
441 struct mlx5_ib_qp **cur_qp,
442 struct ib_wc *wc)
443{
444 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
445 struct mlx5_err_cqe *err_cqe;
446 struct mlx5_cqe64 *cqe64;
447 struct mlx5_core_qp *mqp;
448 struct mlx5_ib_wq *wq;
449 uint8_t opcode;
450 uint32_t qpn;
451 u16 wqe_ctr;
452 void *cqe;
453 int idx;
454
Eli Cohenbde51582014-01-14 17:45:18 +0200455repoll:
Eli Cohene126ba92013-07-07 17:25:49 +0300456 cqe = next_cqe_sw(cq);
457 if (!cqe)
458 return -EAGAIN;
459
460 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
461
462 ++cq->mcq.cons_index;
463
464 /* Make sure we read CQ entry contents after we've checked the
465 * ownership bit.
466 */
467 rmb();
468
Tariq Toukanbdefffd2018-12-04 18:03:02 -0800469 opcode = get_cqe_opcode(cqe64);
Eli Cohenbde51582014-01-14 17:45:18 +0200470 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
471 if (likely(cq->resize_buf)) {
472 free_cq_buf(dev, &cq->buf);
473 cq->buf = *cq->resize_buf;
474 kfree(cq->resize_buf);
475 cq->resize_buf = NULL;
476 goto repoll;
477 } else {
478 mlx5_ib_warn(dev, "unexpected resize cqe\n");
479 }
480 }
Eli Cohene126ba92013-07-07 17:25:49 +0300481
482 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
483 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
484 /* We do not have to take the QP table lock here,
485 * because CQs will be locked while QPs are removed
486 * from the table.
487 */
Leon Romanovsky333fbaa2020-04-04 10:40:24 +0300488 mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
Eli Cohene126ba92013-07-07 17:25:49 +0300489 *cur_qp = to_mibqp(mqp);
490 }
491
492 wc->qp = &(*cur_qp)->ibqp;
Eli Cohene126ba92013-07-07 17:25:49 +0300493 switch (opcode) {
494 case MLX5_CQE_REQ:
495 wq = &(*cur_qp)->sq;
496 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
497 idx = wqe_ctr & (wq->wqe_cnt - 1);
498 handle_good_req(wc, cqe64, wq, idx);
Leon Romanovsky950bf4f2020-03-18 11:16:40 +0200499 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
Eli Cohene126ba92013-07-07 17:25:49 +0300500 wc->wr_id = wq->wrid[idx];
501 wq->tail = wq->wqe_head[idx] + 1;
502 wc->status = IB_WC_SUCCESS;
503 break;
504 case MLX5_CQE_RESP_WR_IMM:
505 case MLX5_CQE_RESP_SEND:
506 case MLX5_CQE_RESP_SEND_IMM:
507 case MLX5_CQE_RESP_SEND_INV:
508 handle_responder(wc, cqe64, *cur_qp);
509 wc->status = IB_WC_SUCCESS;
510 break;
511 case MLX5_CQE_RESIZE_CQ:
512 break;
513 case MLX5_CQE_REQ_ERR:
514 case MLX5_CQE_RESP_ERR:
515 err_cqe = (struct mlx5_err_cqe *)cqe64;
516 mlx5_handle_error_cqe(dev, err_cqe, wc);
517 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
518 opcode == MLX5_CQE_REQ_ERR ?
519 "Requestor" : "Responder", cq->mcq.cqn);
520 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
521 err_cqe->syndrome, err_cqe->vendor_err_synd);
522 if (opcode == MLX5_CQE_REQ_ERR) {
523 wq = &(*cur_qp)->sq;
524 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
525 idx = wqe_ctr & (wq->wqe_cnt - 1);
526 wc->wr_id = wq->wrid[idx];
527 wq->tail = wq->wqe_head[idx] + 1;
528 } else {
529 struct mlx5_ib_srq *srq;
530
531 if ((*cur_qp)->ibqp.srq) {
532 srq = to_msrq((*cur_qp)->ibqp.srq);
533 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
534 wc->wr_id = srq->wrid[wqe_ctr];
535 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
536 } else {
537 wq = &(*cur_qp)->rq;
538 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
539 ++wq->tail;
540 }
541 }
542 break;
Jason Gunthorpe50211ec2019-10-09 13:09:22 -0300543 case MLX5_CQE_SIG_ERR: {
544 struct mlx5_sig_err_cqe *sig_err_cqe =
545 (struct mlx5_sig_err_cqe *)cqe64;
546 struct mlx5_core_sig_ctx *sig;
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200547
Jason Gunthorpe50211ec2019-10-09 13:09:22 -0300548 xa_lock(&dev->sig_mrs);
549 sig = xa_load(&dev->sig_mrs,
Matthew Wilcox792c4e92019-06-20 07:03:47 +0000550 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
Jason Gunthorpe50211ec2019-10-09 13:09:22 -0300551 get_sig_err_item(sig_err_cqe, &sig->err_item);
552 sig->sig_err_exists = true;
553 sig->sigerr_count++;
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200554
555 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
Jason Gunthorpe50211ec2019-10-09 13:09:22 -0300556 cq->mcq.cqn, sig->err_item.key,
557 sig->err_item.err_type,
558 sig->err_item.sig_err_offset,
559 sig->err_item.expected,
560 sig->err_item.actual);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200561
Jason Gunthorpe50211ec2019-10-09 13:09:22 -0300562 xa_unlock(&dev->sig_mrs);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200563 goto repoll;
Eli Cohene126ba92013-07-07 17:25:49 +0300564 }
Jason Gunthorpe50211ec2019-10-09 13:09:22 -0300565 }
Eli Cohene126ba92013-07-07 17:25:49 +0300566
567 return 0;
568}
569
Haggai Eran25361e02016-02-29 15:45:08 +0200570static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
Erez Shitrit7b74a832018-05-21 11:41:01 +0300571 struct ib_wc *wc, bool is_fatal_err)
Haggai Eran25361e02016-02-29 15:45:08 +0200572{
573 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
574 struct mlx5_ib_wc *soft_wc, *next;
575 int npolled = 0;
576
577 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
578 if (npolled >= num_entries)
579 break;
580
581 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
582 cq->mcq.cqn);
583
Erez Shitrit7b74a832018-05-21 11:41:01 +0300584 if (unlikely(is_fatal_err)) {
585 soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
586 soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
587 }
Haggai Eran25361e02016-02-29 15:45:08 +0200588 wc[npolled++] = soft_wc->wc;
589 list_del(&soft_wc->list);
590 kfree(soft_wc);
591 }
592
593 return npolled;
594}
595
Eli Cohene126ba92013-07-07 17:25:49 +0300596int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
597{
598 struct mlx5_ib_cq *cq = to_mcq(ibcq);
599 struct mlx5_ib_qp *cur_qp = NULL;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300600 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
601 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300602 unsigned long flags;
Haggai Eran25361e02016-02-29 15:45:08 +0200603 int soft_polled = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300604 int npolled;
Eli Cohene126ba92013-07-07 17:25:49 +0300605
606 spin_lock_irqsave(&cq->lock, flags);
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300607 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
Erez Shitrit7b74a832018-05-21 11:41:01 +0300608 /* make sure no soft wqe's are waiting */
609 if (unlikely(!list_empty(&cq->wc_list)))
610 soft_polled = poll_soft_wc(cq, num_entries, wc, true);
611
612 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
613 wc + soft_polled, &npolled);
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300614 goto out;
615 }
Eli Cohene126ba92013-07-07 17:25:49 +0300616
Haggai Eran25361e02016-02-29 15:45:08 +0200617 if (unlikely(!list_empty(&cq->wc_list)))
Erez Shitrit7b74a832018-05-21 11:41:01 +0300618 soft_polled = poll_soft_wc(cq, num_entries, wc, false);
Haggai Eran25361e02016-02-29 15:45:08 +0200619
620 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
Leon Romanovskydbdf7d42016-08-28 10:58:38 +0300621 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
Eli Cohene126ba92013-07-07 17:25:49 +0300622 break;
623 }
624
625 if (npolled)
626 mlx5_cq_set_ci(&cq->mcq);
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300627out:
Eli Cohene126ba92013-07-07 17:25:49 +0300628 spin_unlock_irqrestore(&cq->lock, flags);
629
Leon Romanovskydbdf7d42016-08-28 10:58:38 +0300630 return soft_polled + npolled;
Eli Cohene126ba92013-07-07 17:25:49 +0300631}
632
633int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
634{
Saeed Mahameedce0f7502015-04-02 17:07:33 +0300635 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
Haggai Eran25361e02016-02-29 15:45:08 +0200636 struct mlx5_ib_cq *cq = to_mcq(ibcq);
Eli Cohen5fe9dec2017-01-03 23:55:25 +0200637 void __iomem *uar_page = mdev->priv.uar->map;
Haggai Eran25361e02016-02-29 15:45:08 +0200638 unsigned long irq_flags;
639 int ret = 0;
Saeed Mahameedce0f7502015-04-02 17:07:33 +0300640
Haggai Eran25361e02016-02-29 15:45:08 +0200641 spin_lock_irqsave(&cq->lock, irq_flags);
642 if (cq->notify_flags != IB_CQ_NEXT_COMP)
643 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
644
645 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
646 ret = 1;
647 spin_unlock_irqrestore(&cq->lock, irq_flags);
648
649 mlx5_cq_arm(&cq->mcq,
Eli Cohene126ba92013-07-07 17:25:49 +0300650 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
651 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
Eli Cohen5fe9dec2017-01-03 23:55:25 +0200652 uar_page, to_mcq(ibcq)->mcq.cons_index);
Eli Cohene126ba92013-07-07 17:25:49 +0300653
Haggai Eran25361e02016-02-29 15:45:08 +0200654 return ret;
Eli Cohene126ba92013-07-07 17:25:49 +0300655}
656
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200657static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
658 struct mlx5_ib_cq_buf *buf,
659 int nent,
660 int cqe_size)
Eli Cohene126ba92013-07-07 17:25:49 +0300661{
Tariq Toukan4972e6f2018-09-12 15:36:41 +0300662 struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
663 u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
664 u8 log_wq_sz = ilog2(cqe_size);
Eli Cohene126ba92013-07-07 17:25:49 +0300665 int err;
666
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200667 err = mlx5_frag_buf_alloc_node(dev->mdev,
668 nent * cqe_size,
669 frag_buf,
670 dev->mdev->priv.numa_node);
Eli Cohene126ba92013-07-07 17:25:49 +0300671 if (err)
672 return err;
673
Tariq Toukan4972e6f2018-09-12 15:36:41 +0300674 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
675
Eli Cohene126ba92013-07-07 17:25:49 +0300676 buf->cqe_size = cqe_size;
Eli Cohenbde51582014-01-14 17:45:18 +0200677 buf->nent = nent;
Eli Cohene126ba92013-07-07 17:25:49 +0300678
679 return 0;
680}
681
Yonatan Cohen6f1006a2018-05-27 13:42:34 +0300682enum {
683 MLX5_CQE_RES_FORMAT_HASH = 0,
684 MLX5_CQE_RES_FORMAT_CSUM = 1,
685 MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
686};
687
688static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
689{
690 switch (format) {
691 case MLX5_IB_CQE_RES_FORMAT_HASH:
692 return MLX5_CQE_RES_FORMAT_HASH;
693 case MLX5_IB_CQE_RES_FORMAT_CSUM:
694 return MLX5_CQE_RES_FORMAT_CSUM;
695 case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
696 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
697 return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
698 return -EOPNOTSUPP;
699 default:
700 return -EINVAL;
701 }
702}
703
Eli Cohene126ba92013-07-07 17:25:49 +0300704static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300705 struct mlx5_ib_cq *cq, int entries, u32 **cqb,
Eli Cohene126ba92013-07-07 17:25:49 +0300706 int *cqe_size, int *index, int *inlen)
707{
Bodong Wang1cbe6fc2016-10-31 12:16:45 +0200708 struct mlx5_ib_create_cq ucmd = {};
Yann Droneauda8237b32014-05-05 19:33:21 +0200709 size_t ucmdlen;
Eli Cohene126ba92013-07-07 17:25:49 +0300710 int page_shift;
Saeed Mahameed27827782016-07-16 02:33:22 +0300711 __be64 *pas;
Eli Cohene126ba92013-07-07 17:25:49 +0300712 int npages;
713 int ncont;
Saeed Mahameed27827782016-07-16 02:33:22 +0300714 void *cqc;
Eli Cohene126ba92013-07-07 17:25:49 +0300715 int err;
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300716 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
717 udata, struct mlx5_ib_ucontext, ibucontext);
Eli Cohene126ba92013-07-07 17:25:49 +0300718
Yishai Hadas64d99f62020-03-24 08:01:40 +0200719 ucmdlen = min(udata->inlen, sizeof(ucmd));
720 if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
721 return -EINVAL;
Yann Droneauda8237b32014-05-05 19:33:21 +0200722
723 if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
Eli Cohene126ba92013-07-07 17:25:49 +0300724 return -EFAULT;
725
Yishai Hadas64d99f62020-03-24 08:01:40 +0200726 if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
727 MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX)))
Yann Droneauda8237b32014-05-05 19:33:21 +0200728 return -EINVAL;
729
Yishai Hadas64d99f62020-03-24 08:01:40 +0200730 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
731 ucmd.reserved0 || ucmd.reserved1)
Eli Cohene126ba92013-07-07 17:25:49 +0300732 return -EINVAL;
733
734 *cqe_size = ucmd.cqe_size;
735
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200736 cq->buf.umem =
Moni Shouac320e522020-01-15 14:43:31 +0200737 ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
738 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
Eli Cohene126ba92013-07-07 17:25:49 +0300739 if (IS_ERR(cq->buf.umem)) {
740 err = PTR_ERR(cq->buf.umem);
741 return err;
742 }
743
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300744 err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300745 if (err)
746 goto err_umem;
747
Majd Dibbiny762f8992016-10-27 16:36:47 +0300748 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
Eli Cohene126ba92013-07-07 17:25:49 +0300749 &ncont, NULL);
750 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
751 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
752
Saeed Mahameed27827782016-07-16 02:33:22 +0300753 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
754 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +0300755 *cqb = kvzalloc(*inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300756 if (!*cqb) {
757 err = -ENOMEM;
758 goto err_db;
759 }
Saeed Mahameed27827782016-07-16 02:33:22 +0300760
761 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
762 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
763
764 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
765 MLX5_SET(cqc, cqc, log_page_size,
766 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
Eli Cohene126ba92013-07-07 17:25:49 +0300767
Yishai Hadas0a2fd012020-03-24 08:01:43 +0200768 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
Yishai Hadas64d99f62020-03-24 08:01:40 +0200769 *index = ucmd.uar_page_index;
Yishai Hadas0a2fd012020-03-24 08:01:43 +0200770 } else if (context->bfregi.lib_uar_dyn) {
771 err = -EINVAL;
772 goto err_cqb;
773 } else {
Yishai Hadas64d99f62020-03-24 08:01:40 +0200774 *index = context->bfregi.sys_pages[0];
Yishai Hadas0a2fd012020-03-24 08:01:43 +0200775 }
Eli Cohene126ba92013-07-07 17:25:49 +0300776
Bodong Wang1cbe6fc2016-10-31 12:16:45 +0200777 if (ucmd.cqe_comp_en == 1) {
Yonatan Cohen6f1006a2018-05-27 13:42:34 +0300778 int mini_cqe_format;
779
Guy Levide57f2a2017-10-19 08:25:52 +0300780 if (!((*cqe_size == 128 &&
781 MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
782 (*cqe_size == 64 &&
783 MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
Bodong Wang1cbe6fc2016-10-31 12:16:45 +0200784 err = -EOPNOTSUPP;
785 mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
786 *cqe_size);
787 goto err_cqb;
788 }
789
Yonatan Cohen6f1006a2018-05-27 13:42:34 +0300790 mini_cqe_format =
791 mini_cqe_res_format_to_hw(dev,
792 ucmd.cqe_comp_res_format);
793 if (mini_cqe_format < 0) {
794 err = mini_cqe_format;
795 mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
796 ucmd.cqe_comp_res_format, err);
Bodong Wang1cbe6fc2016-10-31 12:16:45 +0200797 goto err_cqb;
798 }
799
800 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
Yonatan Cohen6f1006a2018-05-27 13:42:34 +0300801 MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
Bodong Wang1cbe6fc2016-10-31 12:16:45 +0200802 }
803
Guy Levi7a0c8f42017-10-19 08:25:53 +0300804 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
805 if (*cqe_size != 128 ||
806 !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
807 err = -EOPNOTSUPP;
808 mlx5_ib_warn(dev,
809 "CQE padding is not supported for CQE size of %dB!\n",
810 *cqe_size);
811 goto err_cqb;
812 }
813
814 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
815 }
816
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300817 MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
Eli Cohene126ba92013-07-07 17:25:49 +0300818 return 0;
819
Bodong Wang1cbe6fc2016-10-31 12:16:45 +0200820err_cqb:
Christophe JAILLET909d4342018-05-16 17:50:19 -0700821 kvfree(*cqb);
Bodong Wang1cbe6fc2016-10-31 12:16:45 +0200822
Eli Cohene126ba92013-07-07 17:25:49 +0300823err_db:
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300824 mlx5_ib_db_unmap_user(context, &cq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300825
826err_umem:
827 ib_umem_release(cq->buf.umem);
828 return err;
829}
830
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +0300831static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
Eli Cohene126ba92013-07-07 17:25:49 +0300832{
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +0300833 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
834 udata, struct mlx5_ib_ucontext, ibucontext);
835
836 mlx5_ib_db_unmap_user(context, &cq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300837 ib_umem_release(cq->buf.umem);
838}
839
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200840static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
841 struct mlx5_ib_cq_buf *buf)
Eli Cohene126ba92013-07-07 17:25:49 +0300842{
843 int i;
844 void *cqe;
845 struct mlx5_cqe64 *cqe64;
846
Eli Cohenbde51582014-01-14 17:45:18 +0200847 for (i = 0; i < buf->nent; i++) {
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200848 cqe = get_cqe(cq, i);
Eli Cohenbde51582014-01-14 17:45:18 +0200849 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
850 cqe64->op_own = MLX5_CQE_INVALID << 4;
Eli Cohene126ba92013-07-07 17:25:49 +0300851 }
852}
853
854static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
855 int entries, int cqe_size,
Saeed Mahameed27827782016-07-16 02:33:22 +0300856 u32 **cqb, int *index, int *inlen)
Eli Cohene126ba92013-07-07 17:25:49 +0300857{
Saeed Mahameed27827782016-07-16 02:33:22 +0300858 __be64 *pas;
859 void *cqc;
Eli Cohene126ba92013-07-07 17:25:49 +0300860 int err;
861
Jack Morgenstein9603b612014-07-28 23:30:22 +0300862 err = mlx5_db_alloc(dev->mdev, &cq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300863 if (err)
864 return err;
865
866 cq->mcq.set_ci_db = cq->db.db;
867 cq->mcq.arm_db = cq->db.db + 1;
Eli Cohene126ba92013-07-07 17:25:49 +0300868 cq->mcq.cqe_sz = cqe_size;
869
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200870 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
Eli Cohene126ba92013-07-07 17:25:49 +0300871 if (err)
872 goto err_db;
873
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200874 init_cq_frag_buf(cq, &cq->buf);
Eli Cohene126ba92013-07-07 17:25:49 +0300875
Saeed Mahameed27827782016-07-16 02:33:22 +0300876 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200877 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
Tariq Toukan4972e6f2018-09-12 15:36:41 +0300878 cq->buf.frag_buf.npages;
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +0300879 *cqb = kvzalloc(*inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300880 if (!*cqb) {
881 err = -ENOMEM;
882 goto err_buf;
883 }
Eli Cohene126ba92013-07-07 17:25:49 +0300884
Saeed Mahameed27827782016-07-16 02:33:22 +0300885 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
Tariq Toukan4972e6f2018-09-12 15:36:41 +0300886 mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
Saeed Mahameed27827782016-07-16 02:33:22 +0300887
888 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
889 MLX5_SET(cqc, cqc, log_page_size,
Tariq Toukan4972e6f2018-09-12 15:36:41 +0300890 cq->buf.frag_buf.page_shift -
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200891 MLX5_ADAPTER_PAGE_SHIFT);
Saeed Mahameed27827782016-07-16 02:33:22 +0300892
Eli Cohen5fe9dec2017-01-03 23:55:25 +0200893 *index = dev->mdev->priv.uar->index;
Eli Cohene126ba92013-07-07 17:25:49 +0300894
895 return 0;
896
897err_buf:
898 free_cq_buf(dev, &cq->buf);
899
900err_db:
Jack Morgenstein9603b612014-07-28 23:30:22 +0300901 mlx5_db_free(dev->mdev, &cq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300902 return err;
903}
904
905static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
906{
907 free_cq_buf(dev, &cq->buf);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300908 mlx5_db_free(dev->mdev, &cq->db);
Eli Cohene126ba92013-07-07 17:25:49 +0300909}
910
Haggai Eran25361e02016-02-29 15:45:08 +0200911static void notify_soft_wc_handler(struct work_struct *work)
912{
913 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
914 notify_work);
915
916 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
917}
918
Leon Romanovskye39afe32019-05-28 14:37:29 +0300919int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
920 struct ib_udata *udata)
Eli Cohene126ba92013-07-07 17:25:49 +0300921{
Leon Romanovskye39afe32019-05-28 14:37:29 +0300922 struct ib_device *ibdev = ibcq->device;
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300923 int entries = attr->cqe;
924 int vector = attr->comp_vector;
Eli Cohene126ba92013-07-07 17:25:49 +0300925 struct mlx5_ib_dev *dev = to_mdev(ibdev);
Leon Romanovskye39afe32019-05-28 14:37:29 +0300926 struct mlx5_ib_cq *cq = to_mcq(ibcq);
Yishai Hadas38164b72019-06-30 19:23:25 +0300927 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
Kees Cook3f649ab2020-06-03 13:09:38 -0700928 int index;
929 int inlen;
Saeed Mahameed27827782016-07-16 02:33:22 +0300930 u32 *cqb = NULL;
931 void *cqc;
Eli Cohene126ba92013-07-07 17:25:49 +0300932 int cqe_size;
Doron Tsur0b6e26c2016-01-17 11:25:47 +0200933 unsigned int irqn;
Eli Cohene126ba92013-07-07 17:25:49 +0300934 int eqn;
935 int err;
936
Noa Osherovich9ea57852016-06-04 15:15:34 +0300937 if (entries < 0 ||
938 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
Leon Romanovskye39afe32019-05-28 14:37:29 +0300939 return -EINVAL;
Eli Cohen51ee86a2013-10-23 09:53:13 +0300940
Leon Romanovsky34356f62015-12-29 17:01:30 +0200941 if (check_cq_create_flags(attr->flags))
Leon Romanovskye39afe32019-05-28 14:37:29 +0300942 return -EOPNOTSUPP;
Matan Barak972ecb822015-12-15 20:30:09 +0200943
Eli Cohene126ba92013-07-07 17:25:49 +0300944 entries = roundup_pow_of_two(entries + 1);
Saeed Mahameed938fe832015-05-28 22:28:41 +0300945 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
Leon Romanovskye39afe32019-05-28 14:37:29 +0300946 return -EINVAL;
Eli Cohene126ba92013-07-07 17:25:49 +0300947
948 cq->ibcq.cqe = entries - 1;
949 mutex_init(&cq->resize_mutex);
950 spin_lock_init(&cq->lock);
951 cq->resize_buf = NULL;
952 cq->resize_umem = NULL;
Leon Romanovsky051f2632015-12-20 12:16:11 +0200953 cq->create_flags = attr->flags;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300954 INIT_LIST_HEAD(&cq->list_send_qp);
955 INIT_LIST_HEAD(&cq->list_recv_qp);
Eli Cohene126ba92013-07-07 17:25:49 +0300956
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +0300957 if (udata) {
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300958 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
959 &index, &inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300960 if (err)
Leon Romanovskye39afe32019-05-28 14:37:29 +0300961 return err;
Eli Cohene126ba92013-07-07 17:25:49 +0300962 } else {
Daniel Jurgens16b0e062016-10-27 16:36:41 +0300963 cqe_size = cache_line_size() == 128 ? 128 : 64;
Eli Cohene126ba92013-07-07 17:25:49 +0300964 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
965 &index, &inlen);
966 if (err)
Leon Romanovskye39afe32019-05-28 14:37:29 +0300967 return err;
Haggai Eran25361e02016-02-29 15:45:08 +0200968
969 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
Eli Cohene126ba92013-07-07 17:25:49 +0300970 }
971
Saeed Mahameed233d05d2015-04-02 17:07:32 +0300972 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
Eli Cohene126ba92013-07-07 17:25:49 +0300973 if (err)
974 goto err_cqb;
975
Saeed Mahameed27827782016-07-16 02:33:22 +0300976 cq->cqe_size = cqe_size;
977
978 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
Guy Levi7a0c8f42017-10-19 08:25:53 +0300979 MLX5_SET(cqc, cqc, cqe_sz,
980 cqe_sz_to_mlx_sz(cqe_size,
981 cq->private_flags &
982 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
Saeed Mahameed27827782016-07-16 02:33:22 +0300983 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
984 MLX5_SET(cqc, cqc, uar_page, index);
985 MLX5_SET(cqc, cqc, c_eqn, eqn);
986 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
Jason Gunthorpebeb801a2018-01-26 15:16:46 -0700987 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
Saeed Mahameed27827782016-07-16 02:33:22 +0300988 MLX5_SET(cqc, cqc, oi, 1);
Eli Cohene126ba92013-07-07 17:25:49 +0300989
Yishai Hadas38164b72019-06-30 19:23:25 +0300990 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
Eli Cohene126ba92013-07-07 17:25:49 +0300991 if (err)
992 goto err_cqb;
993
994 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
995 cq->mcq.irqn = irqn;
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300996 if (udata)
Matan Barakc16d27502016-04-17 17:08:41 +0300997 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
998 else
999 cq->mcq.comp = mlx5_ib_cq_comp;
Eli Cohene126ba92013-07-07 17:25:49 +03001000 cq->mcq.event = mlx5_ib_cq_event;
1001
Haggai Eran25361e02016-02-29 15:45:08 +02001002 INIT_LIST_HEAD(&cq->wc_list);
1003
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +03001004 if (udata)
Eli Cohene126ba92013-07-07 17:25:49 +03001005 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1006 err = -EFAULT;
1007 goto err_cmd;
1008 }
1009
1010
Al Viro479163f2014-11-20 08:13:57 +00001011 kvfree(cqb);
Leon Romanovskye39afe32019-05-28 14:37:29 +03001012 return 0;
Eli Cohene126ba92013-07-07 17:25:49 +03001013
1014err_cmd:
Jack Morgenstein9603b612014-07-28 23:30:22 +03001015 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
Eli Cohene126ba92013-07-07 17:25:49 +03001016
1017err_cqb:
Al Viro479163f2014-11-20 08:13:57 +00001018 kvfree(cqb);
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +03001019 if (udata)
1020 destroy_cq_user(cq, udata);
Eli Cohene126ba92013-07-07 17:25:49 +03001021 else
1022 destroy_cq_kernel(dev, cq);
Leon Romanovskye39afe32019-05-28 14:37:29 +03001023 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03001024}
1025
Leon Romanovskya52c8e22019-05-28 14:37:28 +03001026void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
Eli Cohene126ba92013-07-07 17:25:49 +03001027{
1028 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1029 struct mlx5_ib_cq *mcq = to_mcq(cq);
Eli Cohene126ba92013-07-07 17:25:49 +03001030
Jack Morgenstein9603b612014-07-28 23:30:22 +03001031 mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +03001032 if (udata)
1033 destroy_cq_user(mcq, udata);
Eli Cohene126ba92013-07-07 17:25:49 +03001034 else
1035 destroy_cq_kernel(dev, mcq);
Eli Cohene126ba92013-07-07 17:25:49 +03001036}
1037
Moshe Lazercfd8f1d2013-10-23 09:53:17 +03001038static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
Eli Cohene126ba92013-07-07 17:25:49 +03001039{
Moshe Lazercfd8f1d2013-10-23 09:53:17 +03001040 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
Eli Cohene126ba92013-07-07 17:25:49 +03001041}
1042
1043void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1044{
1045 struct mlx5_cqe64 *cqe64, *dest64;
1046 void *cqe, *dest;
1047 u32 prod_index;
1048 int nfreed = 0;
1049 u8 owner_bit;
1050
1051 if (!cq)
1052 return;
1053
1054 /* First we need to find the current producer index, so we
1055 * know where to start cleaning from. It doesn't matter if HW
1056 * adds new entries after this loop -- the QP we're worried
1057 * about is already in RESET, so the new entries won't come
1058 * from our QP and therefore don't need to be checked.
1059 */
1060 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1061 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1062 break;
1063
1064 /* Now sweep backwards through the CQ, removing CQ entries
1065 * that match our QP by copying older entries on top of them.
1066 */
1067 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1068 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1069 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
Moshe Lazercfd8f1d2013-10-23 09:53:17 +03001070 if (is_equal_rsn(cqe64, rsn)) {
1071 if (srq && (ntohl(cqe64->srqn) & 0xffffff))
Eli Cohene126ba92013-07-07 17:25:49 +03001072 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1073 ++nfreed;
1074 } else if (nfreed) {
1075 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1076 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1077 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1078 memcpy(dest, cqe, cq->mcq.cqe_sz);
1079 dest64->op_own = owner_bit |
1080 (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1081 }
1082 }
1083
1084 if (nfreed) {
1085 cq->mcq.cons_index += nfreed;
1086 /* Make sure update of buffer contents is done before
1087 * updating consumer index.
1088 */
1089 wmb();
1090 mlx5_cq_set_ci(&cq->mcq);
1091 }
1092}
1093
1094void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1095{
1096 if (!cq)
1097 return;
1098
1099 spin_lock_irq(&cq->lock);
1100 __mlx5_ib_cq_clean(cq, qpn, srq);
1101 spin_unlock_irq(&cq->lock);
1102}
1103
1104int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1105{
Eli Cohen3bdb31f2014-01-14 17:45:17 +02001106 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1107 struct mlx5_ib_cq *mcq = to_mcq(cq);
1108 int err;
Eli Cohen3bdb31f2014-01-14 17:45:17 +02001109
Saeed Mahameed938fe832015-05-28 22:28:41 +03001110 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
Kamal Heib26e551c2018-07-31 09:02:36 +03001111 return -EOPNOTSUPP;
Eli Cohen3bdb31f2014-01-14 17:45:17 +02001112
Yonatan Cohenb0e9df62017-11-13 10:51:15 +02001113 if (cq_period > MLX5_MAX_CQ_PERIOD)
1114 return -EINVAL;
1115
Saeed Mahameed27827782016-07-16 02:33:22 +03001116 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1117 cq_period, cq_count);
Eli Cohen3bdb31f2014-01-14 17:45:17 +02001118 if (err)
1119 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1120
1121 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03001122}
1123
Eli Cohenbde51582014-01-14 17:45:18 +02001124static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1125 int entries, struct ib_udata *udata, int *npas,
1126 int *page_shift, int *cqe_size)
1127{
1128 struct mlx5_ib_resize_cq ucmd;
1129 struct ib_umem *umem;
1130 int err;
1131 int npages;
Eli Cohenbde51582014-01-14 17:45:18 +02001132
Eli Cohen57761d82014-01-15 14:56:44 +02001133 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1134 if (err)
1135 return err;
1136
1137 if (ucmd.reserved0 || ucmd.reserved1)
1138 return -EINVAL;
Eli Cohenbde51582014-01-14 17:45:18 +02001139
Leon Romanovsky28e90912018-03-07 15:29:09 +02001140 /* check multiplication overflow */
1141 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1142 return -EINVAL;
1143
Moni Shouac320e522020-01-15 14:43:31 +02001144 umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
Leon Romanovsky28e90912018-03-07 15:29:09 +02001145 (size_t)ucmd.cqe_size * entries,
Christoph Hellwig72b894b2019-11-13 08:32:14 +01001146 IB_ACCESS_LOCAL_WRITE);
Eli Cohenbde51582014-01-14 17:45:18 +02001147 if (IS_ERR(umem)) {
1148 err = PTR_ERR(umem);
1149 return err;
1150 }
1151
Majd Dibbiny762f8992016-10-27 16:36:47 +03001152 mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
Eli Cohenbde51582014-01-14 17:45:18 +02001153 npas, NULL);
1154
1155 cq->resize_umem = umem;
1156 *cqe_size = ucmd.cqe_size;
1157
1158 return 0;
1159}
1160
Eli Cohenbde51582014-01-14 17:45:18 +02001161static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1162 int entries, int cqe_size)
1163{
1164 int err;
1165
1166 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1167 if (!cq->resize_buf)
1168 return -ENOMEM;
1169
Yonatan Cohen388ca8b2018-01-02 16:08:06 +02001170 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
Eli Cohenbde51582014-01-14 17:45:18 +02001171 if (err)
1172 goto ex;
1173
Yonatan Cohen388ca8b2018-01-02 16:08:06 +02001174 init_cq_frag_buf(cq, cq->resize_buf);
Eli Cohenbde51582014-01-14 17:45:18 +02001175
1176 return 0;
1177
1178ex:
1179 kfree(cq->resize_buf);
1180 return err;
1181}
1182
Eli Cohenbde51582014-01-14 17:45:18 +02001183static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1184{
1185 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1186 struct mlx5_cqe64 *scqe64;
1187 struct mlx5_cqe64 *dcqe64;
1188 void *start_cqe;
1189 void *scqe;
1190 void *dcqe;
1191 int ssize;
1192 int dsize;
1193 int i;
1194 u8 sw_own;
1195
1196 ssize = cq->buf.cqe_size;
1197 dsize = cq->resize_buf->cqe_size;
1198 if (ssize != dsize) {
1199 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1200 return -EINVAL;
1201 }
1202
1203 i = cq->mcq.cons_index;
1204 scqe = get_sw_cqe(cq, i);
1205 scqe64 = ssize == 64 ? scqe : scqe + 64;
1206 start_cqe = scqe;
1207 if (!scqe) {
1208 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1209 return -EINVAL;
1210 }
1211
Tariq Toukanbdefffd2018-12-04 18:03:02 -08001212 while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
Yonatan Cohen388ca8b2018-01-02 16:08:06 +02001213 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1214 (i + 1) & cq->resize_buf->nent);
Eli Cohenbde51582014-01-14 17:45:18 +02001215 dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1216 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1217 memcpy(dcqe, scqe, dsize);
1218 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1219
1220 ++i;
1221 scqe = get_sw_cqe(cq, i);
1222 scqe64 = ssize == 64 ? scqe : scqe + 64;
1223 if (!scqe) {
1224 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1225 return -EINVAL;
1226 }
1227
1228 if (scqe == start_cqe) {
1229 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1230 cq->mcq.cqn);
1231 return -ENOMEM;
1232 }
1233 }
1234 ++cq->mcq.cons_index;
1235 return 0;
1236}
1237
Eli Cohene126ba92013-07-07 17:25:49 +03001238int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1239{
Eli Cohenbde51582014-01-14 17:45:18 +02001240 struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1241 struct mlx5_ib_cq *cq = to_mcq(ibcq);
Saeed Mahameed27827782016-07-16 02:33:22 +03001242 void *cqc;
1243 u32 *in;
Eli Cohenbde51582014-01-14 17:45:18 +02001244 int err;
1245 int npas;
Saeed Mahameed27827782016-07-16 02:33:22 +03001246 __be64 *pas;
Eli Cohenbde51582014-01-14 17:45:18 +02001247 int page_shift;
1248 int inlen;
Kees Cook3f649ab2020-06-03 13:09:38 -07001249 int cqe_size;
Eli Cohenbde51582014-01-14 17:45:18 +02001250 unsigned long flags;
1251
Saeed Mahameed938fe832015-05-28 22:28:41 +03001252 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
Eli Cohenbde51582014-01-14 17:45:18 +02001253 pr_info("Firmware does not support resize CQ\n");
1254 return -ENOSYS;
1255 }
1256
Noa Osherovich3c4c3772016-06-04 15:15:35 +03001257 if (entries < 1 ||
1258 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1259 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1260 entries,
1261 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
Eli Cohenbde51582014-01-14 17:45:18 +02001262 return -EINVAL;
Noa Osherovich3c4c3772016-06-04 15:15:35 +03001263 }
Eli Cohenbde51582014-01-14 17:45:18 +02001264
1265 entries = roundup_pow_of_two(entries + 1);
Noa Osherovich3c4c3772016-06-04 15:15:35 +03001266 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
Eli Cohenbde51582014-01-14 17:45:18 +02001267 return -EINVAL;
1268
1269 if (entries == ibcq->cqe + 1)
1270 return 0;
1271
1272 mutex_lock(&cq->resize_mutex);
1273 if (udata) {
1274 err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1275 &cqe_size);
1276 } else {
1277 cqe_size = 64;
1278 err = resize_kernel(dev, cq, entries, cqe_size);
1279 if (!err) {
Tariq Toukan4972e6f2018-09-12 15:36:41 +03001280 struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;
Yonatan Cohen388ca8b2018-01-02 16:08:06 +02001281
Tariq Toukan4972e6f2018-09-12 15:36:41 +03001282 npas = frag_buf->npages;
1283 page_shift = frag_buf->page_shift;
Eli Cohenbde51582014-01-14 17:45:18 +02001284 }
1285 }
1286
1287 if (err)
1288 goto ex;
1289
Saeed Mahameed27827782016-07-16 02:33:22 +03001290 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1291 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1292
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001293 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohenbde51582014-01-14 17:45:18 +02001294 if (!in) {
1295 err = -ENOMEM;
1296 goto ex_resize;
1297 }
1298
Saeed Mahameed27827782016-07-16 02:33:22 +03001299 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
Eli Cohenbde51582014-01-14 17:45:18 +02001300 if (udata)
1301 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
Saeed Mahameed27827782016-07-16 02:33:22 +03001302 pas, 0);
Eli Cohenbde51582014-01-14 17:45:18 +02001303 else
Tariq Toukan4972e6f2018-09-12 15:36:41 +03001304 mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
Eli Cohenbde51582014-01-14 17:45:18 +02001305
Saeed Mahameed27827782016-07-16 02:33:22 +03001306 MLX5_SET(modify_cq_in, in,
1307 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1308 MLX5_MODIFY_CQ_MASK_LOG_SIZE |
1309 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1310 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1311
1312 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1313
1314 MLX5_SET(cqc, cqc, log_page_size,
1315 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
Guy Levi7a0c8f42017-10-19 08:25:53 +03001316 MLX5_SET(cqc, cqc, cqe_sz,
1317 cqe_sz_to_mlx_sz(cqe_size,
1318 cq->private_flags &
1319 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
Saeed Mahameed27827782016-07-16 02:33:22 +03001320 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1321
1322 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1323 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
Eli Cohenbde51582014-01-14 17:45:18 +02001324
Jack Morgenstein9603b612014-07-28 23:30:22 +03001325 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
Eli Cohenbde51582014-01-14 17:45:18 +02001326 if (err)
1327 goto ex_alloc;
1328
1329 if (udata) {
1330 cq->ibcq.cqe = entries - 1;
1331 ib_umem_release(cq->buf.umem);
1332 cq->buf.umem = cq->resize_umem;
1333 cq->resize_umem = NULL;
1334 } else {
1335 struct mlx5_ib_cq_buf tbuf;
1336 int resized = 0;
1337
1338 spin_lock_irqsave(&cq->lock, flags);
1339 if (cq->resize_buf) {
1340 err = copy_resize_cqes(cq);
1341 if (!err) {
1342 tbuf = cq->buf;
1343 cq->buf = *cq->resize_buf;
1344 kfree(cq->resize_buf);
1345 cq->resize_buf = NULL;
1346 resized = 1;
1347 }
1348 }
1349 cq->ibcq.cqe = entries - 1;
1350 spin_unlock_irqrestore(&cq->lock, flags);
1351 if (resized)
1352 free_cq_buf(dev, &tbuf);
1353 }
1354 mutex_unlock(&cq->resize_mutex);
1355
Al Viro479163f2014-11-20 08:13:57 +00001356 kvfree(in);
Eli Cohenbde51582014-01-14 17:45:18 +02001357 return 0;
1358
1359ex_alloc:
Al Viro479163f2014-11-20 08:13:57 +00001360 kvfree(in);
Eli Cohenbde51582014-01-14 17:45:18 +02001361
1362ex_resize:
Leon Romanovsky836a0fb2019-06-16 15:05:20 +03001363 ib_umem_release(cq->resize_umem);
1364 if (!udata) {
1365 free_cq_buf(dev, cq->resize_buf);
1366 cq->resize_buf = NULL;
1367 }
Eli Cohenbde51582014-01-14 17:45:18 +02001368ex:
1369 mutex_unlock(&cq->resize_mutex);
1370 return err;
Eli Cohene126ba92013-07-07 17:25:49 +03001371}
1372
Yonatan Cohen5d6ff1b2018-10-09 12:05:13 +03001373int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
Eli Cohene126ba92013-07-07 17:25:49 +03001374{
1375 struct mlx5_ib_cq *cq;
1376
1377 if (!ibcq)
1378 return 128;
1379
1380 cq = to_mcq(ibcq);
1381 return cq->cqe_size;
1382}
Haggai Eran25361e02016-02-29 15:45:08 +02001383
1384/* Called from atomic context */
1385int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1386{
1387 struct mlx5_ib_wc *soft_wc;
1388 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1389 unsigned long flags;
1390
1391 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1392 if (!soft_wc)
1393 return -ENOMEM;
1394
1395 soft_wc->wc = *wc;
1396 spin_lock_irqsave(&cq->lock, flags);
1397 list_add_tail(&soft_wc->list, &cq->wc_list);
1398 if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1399 wc->status != IB_WC_SUCCESS) {
1400 cq->notify_flags = 0;
1401 schedule_work(&cq->notify_work);
1402 }
1403 spin_unlock_irqrestore(&cq->lock, flags);
1404
1405 return 0;
1406}