blob: 022a0b4ea452ad28923846ccf7ef3a80ebd92ae2 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +000036#include <linux/mlx4/srq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070038
39#include "mlx4_ib.h"
Leon Romanovsky9ce28a22016-09-22 17:31:14 +030040#include <rdma/mlx4-abi.h>
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +030041#include <rdma/uverbs_ioctl.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070042
43static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
44{
45 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
46 ibcq->comp_handler(ibcq, ibcq->cq_context);
47}
48
49static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
50{
51 struct ib_event event;
52 struct ib_cq *ibcq;
53
54 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +030055 pr_warn("Unexpected event type %d "
Roland Dreier225c7b12007-05-08 18:00:38 -070056 "on CQ %06x\n", type, cq->cqn);
57 return;
58 }
59
60 ibcq = &to_mibcq(cq)->ibcq;
61 if (ibcq->event_handler) {
62 event.device = ibcq->device;
63 event.event = IB_EVENT_CQ_ERR;
64 event.element.cq = ibcq;
65 ibcq->event_handler(&event, ibcq->cq_context);
66 }
67}
68
69static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
70{
Or Gerlitz08ff3232012-10-21 14:59:24 +000071 return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
Roland Dreier225c7b12007-05-08 18:00:38 -070072}
73
74static void *get_cqe(struct mlx4_ib_cq *cq, int n)
75{
76 return get_cqe_from_buf(&cq->buf, n);
77}
78
79static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
80{
81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +000082 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
Roland Dreier225c7b12007-05-08 18:00:38 -070083
Or Gerlitz08ff3232012-10-21 14:59:24 +000084 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
Roland Dreier225c7b12007-05-08 18:00:38 -070085 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
86}
87
88static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
89{
90 return get_sw_cqe(cq, cq->mcq.cons_index);
91}
92
Eli Cohen3fdcb972008-04-16 21:09:33 -070093int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
94{
95 struct mlx4_ib_cq *mcq = to_mcq(cq);
96 struct mlx4_ib_dev *dev = to_mdev(cq->device);
97
98 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
99}
100
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700101static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
102{
103 int err;
104
Or Gerlitz08ff3232012-10-21 14:59:24 +0000105 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
Leon Romanovsky8900b892017-05-23 14:38:15 +0300106 PAGE_SIZE * 2, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700107
108 if (err)
109 goto out;
110
Or Gerlitz08ff3232012-10-21 14:59:24 +0000111 buf->entry_size = dev->dev->caps.cqe_size;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700112 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
113 &buf->mtt);
114 if (err)
115 goto err_buf;
116
Leon Romanovsky8900b892017-05-23 14:38:15 +0300117 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700118 if (err)
119 goto err_mtt;
120
121 return 0;
122
123err_mtt:
124 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
125
126err_buf:
Or Gerlitz08ff3232012-10-21 14:59:24 +0000127 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700128
129out:
130 return err;
131}
132
133static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
134{
Or Gerlitz08ff3232012-10-21 14:59:24 +0000135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700136}
137
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200138static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
139 struct mlx4_ib_cq_buf *buf,
140 struct ib_umem **umem, u64 buf_addr, int cqe)
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700141{
142 int err;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000143 int cqe_size = dev->dev->caps.cqe_size;
Guy Levied8637d2017-11-02 15:22:25 +0200144 int shift;
145 int n;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700146
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200147 *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
Arthur Kepnercb9fbc52008-04-29 01:00:34 -0700148 IB_ACCESS_LOCAL_WRITE, 1);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700149 if (IS_ERR(*umem))
150 return PTR_ERR(*umem);
151
Guy Levied8637d2017-11-02 15:22:25 +0200152 n = ib_umem_page_count(*umem);
153 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
154 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
155
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700156 if (err)
157 goto err_buf;
158
159 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
160 if (err)
161 goto err_mtt;
162
163 return 0;
164
165err_mtt:
166 mlx4_mtt_cleanup(dev->dev, &buf->mtt);
167
168err_buf:
169 ib_umem_release(*umem);
170
171 return err;
172}
173
Jason Gunthorpebeb801a2018-01-26 15:16:46 -0700174#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300175struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
176 const struct ib_cq_init_attr *attr,
Roland Dreier225c7b12007-05-08 18:00:38 -0700177 struct ib_udata *udata)
178{
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300179 int entries = attr->cqe;
180 int vector = attr->comp_vector;
Roland Dreier225c7b12007-05-08 18:00:38 -0700181 struct mlx4_ib_dev *dev = to_mdev(ibdev);
182 struct mlx4_ib_cq *cq;
183 struct mlx4_uar *uar;
Daniel Jurgense4567892018-11-21 17:12:05 +0200184 void *buf_addr;
Roland Dreier225c7b12007-05-08 18:00:38 -0700185 int err;
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300186 struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
187 udata, struct mlx4_ib_ucontext, ibucontext);
Roland Dreier225c7b12007-05-08 18:00:38 -0700188
Matan Barak4b664c42015-06-11 16:35:27 +0300189 if (entries < 1 || entries > dev->dev->caps.max_cqes)
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300190 return ERR_PTR(-EINVAL);
191
Matan Barak4b664c42015-06-11 16:35:27 +0300192 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED)
Roland Dreier225c7b12007-05-08 18:00:38 -0700193 return ERR_PTR(-EINVAL);
194
Leon Romanovsky09758902019-01-09 20:15:59 +0200195 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -0700196 if (!cq)
197 return ERR_PTR(-ENOMEM);
198
199 entries = roundup_pow_of_two(entries + 1);
200 cq->ibcq.cqe = entries - 1;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700201 mutex_init(&cq->resize_mutex);
Roland Dreier225c7b12007-05-08 18:00:38 -0700202 spin_lock_init(&cq->lock);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700203 cq->resize_buf = NULL;
204 cq->resize_umem = NULL;
Matan Barak4b664c42015-06-11 16:35:27 +0300205 cq->create_flags = attr->flags;
Yishai Hadas35f05da2015-02-08 11:49:34 +0200206 INIT_LIST_HEAD(&cq->send_qp_list);
207 INIT_LIST_HEAD(&cq->recv_qp_list);
Roland Dreier225c7b12007-05-08 18:00:38 -0700208
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300209 if (udata) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700210 struct mlx4_ib_create_cq ucmd;
211
212 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
213 err = -EFAULT;
214 goto err_cq;
215 }
216
Daniel Jurgense4567892018-11-21 17:12:05 +0200217 buf_addr = (void *)(unsigned long)ucmd.buf_addr;
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200218 err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem,
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700219 ucmd.buf_addr, entries);
220 if (err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700221 goto err_cq;
Roland Dreier225c7b12007-05-08 18:00:38 -0700222
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300223 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700224 if (err)
225 goto err_mtt;
226
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300227 uar = &context->uar;
Moshe Shemeshf3301872017-06-21 09:29:36 +0300228 cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
Roland Dreier225c7b12007-05-08 18:00:38 -0700229 } else {
Leon Romanovsky8900b892017-05-23 14:38:15 +0300230 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
Roland Dreier225c7b12007-05-08 18:00:38 -0700231 if (err)
232 goto err_cq;
233
234 cq->mcq.set_ci_db = cq->db.db;
235 cq->mcq.arm_db = cq->db.db + 1;
236 *cq->mcq.set_ci_db = 0;
237 *cq->mcq.arm_db = 0;
238
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700239 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
240 if (err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700241 goto err_db;
Roland Dreier225c7b12007-05-08 18:00:38 -0700242
Daniel Jurgense4567892018-11-21 17:12:05 +0200243 buf_addr = &cq->buf.buf;
244
Roland Dreier225c7b12007-05-08 18:00:38 -0700245 uar = &dev->priv_uar;
Moshe Shemeshf3301872017-06-21 09:29:36 +0300246 cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
Roland Dreier225c7b12007-05-08 18:00:38 -0700247 }
248
Shlomo Pongratze605b742012-04-29 17:04:27 +0300249 if (dev->eq_table)
250 vector = dev->eq_table[vector % ibdev->num_comp_vectors];
251
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300252 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
253 &cq->mcq, vector, 0,
Daniel Jurgense4567892018-11-21 17:12:05 +0200254 !!(cq->create_flags &
255 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300256 buf_addr, !!udata);
Roland Dreier225c7b12007-05-08 18:00:38 -0700257 if (err)
258 goto err_dbmap;
259
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300260 if (udata)
Matan Barak3dca0f422014-12-11 10:57:53 +0200261 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
262 else
263 cq->mcq.comp = mlx4_ib_cq_comp;
Roland Dreier225c7b12007-05-08 18:00:38 -0700264 cq->mcq.event = mlx4_ib_cq_event;
265
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300266 if (udata)
Roland Dreier225c7b12007-05-08 18:00:38 -0700267 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
268 err = -EFAULT;
Matan Barak593ff732016-11-10 11:30:55 +0200269 goto err_cq_free;
Roland Dreier225c7b12007-05-08 18:00:38 -0700270 }
271
272 return &cq->ibcq;
273
Matan Barak593ff732016-11-10 11:30:55 +0200274err_cq_free:
275 mlx4_cq_free(dev->dev, &cq->mcq);
276
Roland Dreier225c7b12007-05-08 18:00:38 -0700277err_dbmap:
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300278 if (udata)
279 mlx4_ib_db_unmap_user(context, &cq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700280
281err_mtt:
282 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
283
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300284 if (udata)
Roland Dreier225c7b12007-05-08 18:00:38 -0700285 ib_umem_release(cq->umem);
286 else
Roland Dreier3ae15e12008-04-30 19:52:55 -0700287 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
Roland Dreier225c7b12007-05-08 18:00:38 -0700288
289err_db:
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +0300290 if (!udata)
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700291 mlx4_db_free(dev->dev, &cq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700292
293err_cq:
294 kfree(cq);
295
296 return ERR_PTR(err);
297}
298
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700299static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
300 int entries)
301{
302 int err;
303
304 if (cq->resize_buf)
305 return -EBUSY;
306
Roland Dreier0c87b67202016-07-28 21:58:43 -0700307 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700308 if (!cq->resize_buf)
309 return -ENOMEM;
310
311 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
312 if (err) {
313 kfree(cq->resize_buf);
314 cq->resize_buf = NULL;
315 return err;
316 }
317
318 cq->resize_buf->cqe = entries - 1;
319
320 return 0;
321}
322
323static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
324 int entries, struct ib_udata *udata)
325{
326 struct mlx4_ib_resize_cq ucmd;
327 int err;
328
329 if (cq->resize_umem)
330 return -EBUSY;
331
332 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
333 return -EFAULT;
334
Roland Dreier0c87b67202016-07-28 21:58:43 -0700335 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700336 if (!cq->resize_buf)
337 return -ENOMEM;
338
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200339 err = mlx4_ib_get_cq_umem(dev, udata, &cq->resize_buf->buf,
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700340 &cq->resize_umem, ucmd.buf_addr, entries);
341 if (err) {
342 kfree(cq->resize_buf);
343 cq->resize_buf = NULL;
344 return err;
345 }
346
347 cq->resize_buf->cqe = entries - 1;
348
349 return 0;
350}
351
352static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
353{
354 u32 i;
355
356 i = cq->mcq.cons_index;
Eli Cohen93b80ac2013-10-31 15:26:35 +0200357 while (get_sw_cqe(cq, i))
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700358 ++i;
359
360 return i - cq->mcq.cons_index;
361}
362
363static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
364{
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800365 struct mlx4_cqe *cqe, *new_cqe;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700366 int i;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000367 int cqe_size = cq->buf.entry_size;
368 int cqe_inc = cqe_size == 64 ? 1 : 0;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700369
370 i = cq->mcq.cons_index;
371 cqe = get_cqe(cq, i & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000372 cqe += cqe_inc;
373
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700374 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800375 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
376 (i + 1) & cq->resize_buf->cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000377 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
378 new_cqe += cqe_inc;
379
Jack Morgenstein7798dbf2008-12-24 20:32:42 -0800380 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
381 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700382 cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000383 cqe += cqe_inc;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700384 }
385 ++cq->mcq.cons_index;
386}
387
388int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
389{
390 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
391 struct mlx4_ib_cq *cq = to_mcq(ibcq);
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800392 struct mlx4_mtt mtt;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700393 int outst_cqe;
394 int err;
395
396 mutex_lock(&cq->resize_mutex);
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200397 if (entries < 1 || entries > dev->dev->caps.max_cqes) {
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700398 err = -EINVAL;
399 goto out;
400 }
401
402 entries = roundup_pow_of_two(entries + 1);
403 if (entries == ibcq->cqe + 1) {
404 err = 0;
405 goto out;
406 }
407
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200408 if (entries > dev->dev->caps.max_cqes + 1) {
Eli Cohen79d3da92013-10-31 15:26:34 +0200409 err = -EINVAL;
410 goto out;
411 }
412
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700413 if (ibcq->uobject) {
414 err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
415 if (err)
416 goto out;
417 } else {
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200418 /* Can't be smaller than the number of outstanding CQEs */
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700419 outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
420 if (entries < outst_cqe + 1) {
Majd Dibbiny8ab94062015-01-29 10:41:42 +0200421 err = -EINVAL;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700422 goto out;
423 }
424
425 err = mlx4_alloc_resize_buf(dev, cq, entries);
426 if (err)
427 goto out;
428 }
429
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800430 mtt = cq->buf.mtt;
431
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700432 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
433 if (err)
434 goto err_buf;
435
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800436 mlx4_mtt_cleanup(dev->dev, &mtt);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700437 if (ibcq->uobject) {
438 cq->buf = cq->resize_buf->buf;
439 cq->ibcq.cqe = cq->resize_buf->cqe;
440 ib_umem_release(cq->umem);
441 cq->umem = cq->resize_umem;
442
443 kfree(cq->resize_buf);
444 cq->resize_buf = NULL;
445 cq->resize_umem = NULL;
446 } else {
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800447 struct mlx4_ib_cq_buf tmp_buf;
448 int tmp_cqe = 0;
449
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700450 spin_lock_irq(&cq->lock);
451 if (cq->resize_buf) {
452 mlx4_ib_cq_resize_copy_cqes(cq);
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800453 tmp_buf = cq->buf;
454 tmp_cqe = cq->ibcq.cqe;
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700455 cq->buf = cq->resize_buf->buf;
456 cq->ibcq.cqe = cq->resize_buf->cqe;
457
458 kfree(cq->resize_buf);
459 cq->resize_buf = NULL;
460 }
461 spin_unlock_irq(&cq->lock);
Vladimir Sokolovsky3afa9f192011-01-10 17:42:06 -0800462
463 if (tmp_cqe)
464 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700465 }
466
467 goto out;
468
469err_buf:
Jack Morgenstein42ab01c2008-12-01 10:09:37 -0800470 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700471 if (!ibcq->uobject)
472 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
473 cq->resize_buf->cqe);
474
475 kfree(cq->resize_buf);
476 cq->resize_buf = NULL;
477
478 if (cq->resize_umem) {
479 ib_umem_release(cq->resize_umem);
480 cq->resize_umem = NULL;
481 }
482
483out:
484 mutex_unlock(&cq->resize_mutex);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000485
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700486 return err;
487}
488
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +0300489int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
Roland Dreier225c7b12007-05-08 18:00:38 -0700490{
491 struct mlx4_ib_dev *dev = to_mdev(cq->device);
492 struct mlx4_ib_cq *mcq = to_mcq(cq);
493
494 mlx4_cq_free(dev->dev, &mcq->mcq);
495 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
496
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +0300497 if (udata) {
498 mlx4_ib_db_unmap_user(
499 rdma_udata_to_drv_context(
500 udata,
501 struct mlx4_ib_ucontext,
502 ibucontext),
503 &mcq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700504 ib_umem_release(mcq->umem);
505 } else {
Roland Dreier3ae15e12008-04-30 19:52:55 -0700506 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
Yevgeny Petrilin62968832008-04-23 11:55:45 -0700507 mlx4_db_free(dev->dev, &mcq->db);
Roland Dreier225c7b12007-05-08 18:00:38 -0700508 }
509
510 kfree(mcq);
511
512 return 0;
513}
514
515static void dump_cqe(void *cqe)
516{
517 __be32 *buf = cqe;
518
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300519 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
Roland Dreier225c7b12007-05-08 18:00:38 -0700520 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
521 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
522 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
523}
524
525static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
526 struct ib_wc *wc)
527{
528 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300529 pr_debug("local QP operation err "
Roland Dreier225c7b12007-05-08 18:00:38 -0700530 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
531 "opcode = %02x)\n",
532 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
533 cqe->vendor_err_syndrome,
534 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
535 dump_cqe(cqe);
536 }
537
538 switch (cqe->syndrome) {
539 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
540 wc->status = IB_WC_LOC_LEN_ERR;
541 break;
542 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
543 wc->status = IB_WC_LOC_QP_OP_ERR;
544 break;
545 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
546 wc->status = IB_WC_LOC_PROT_ERR;
547 break;
548 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
549 wc->status = IB_WC_WR_FLUSH_ERR;
550 break;
551 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
552 wc->status = IB_WC_MW_BIND_ERR;
553 break;
554 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
555 wc->status = IB_WC_BAD_RESP_ERR;
556 break;
557 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
558 wc->status = IB_WC_LOC_ACCESS_ERR;
559 break;
560 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
561 wc->status = IB_WC_REM_INV_REQ_ERR;
562 break;
563 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
564 wc->status = IB_WC_REM_ACCESS_ERR;
565 break;
566 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
567 wc->status = IB_WC_REM_OP_ERR;
568 break;
569 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
570 wc->status = IB_WC_RETRY_EXC_ERR;
571 break;
572 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
573 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
574 break;
575 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
576 wc->status = IB_WC_REM_ABORT_ERR;
577 break;
578 default:
579 wc->status = IB_WC_GENERAL_ERR;
580 break;
581 }
582
583 wc->vendor_err = cqe->vendor_err_syndrome;
584}
585
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700586static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
Eli Cohen8ff095e2008-04-16 21:01:10 -0700587{
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700588 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
589 MLX4_CQE_STATUS_IPV4F |
590 MLX4_CQE_STATUS_IPV4OPT |
591 MLX4_CQE_STATUS_IPV6 |
592 MLX4_CQE_STATUS_IPOK)) ==
593 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
594 MLX4_CQE_STATUS_IPOK)) &&
595 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
596 MLX4_CQE_STATUS_TCP)) &&
Eli Cohen8ff095e2008-04-16 21:01:10 -0700597 checksum == cpu_to_be16(0xffff);
598}
599
Yuval Shaiae6a00f62016-07-27 01:24:52 -0700600static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
601 unsigned tail, struct mlx4_cqe *cqe, int is_eth)
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000602{
603 struct mlx4_ib_proxy_sqp_hdr *hdr;
604
605 ib_dma_sync_single_for_cpu(qp->ibqp.device,
606 qp->sqp_proxy_rcv[tail].map,
607 sizeof (struct mlx4_ib_proxy_sqp_hdr),
608 DMA_FROM_DEVICE);
609 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
610 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000611 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
612 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
613 wc->dlid_path_bits = 0;
614
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200615 if (is_eth) {
Moni Shoua65389322018-02-25 13:39:54 +0200616 wc->slid = 0;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200617 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
618 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
619 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
620 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
621 } else {
622 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
623 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
624 }
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000625}
626
Yishai Hadas35f05da2015-02-08 11:49:34 +0200627static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
628 struct ib_wc *wc, int *npolled, int is_send)
629{
630 struct mlx4_ib_wq *wq;
631 unsigned cur;
632 int i;
633
634 wq = is_send ? &qp->sq : &qp->rq;
635 cur = wq->head - wq->tail;
636
637 if (cur == 0)
638 return;
639
640 for (i = 0; i < cur && *npolled < num_entries; i++) {
641 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
642 wc->status = IB_WC_WR_FLUSH_ERR;
643 wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
644 wq->tail++;
645 (*npolled)++;
646 wc->qp = &qp->ibqp;
647 wc++;
648 }
649}
650
651static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
652 struct ib_wc *wc, int *npolled)
653{
654 struct mlx4_ib_qp *qp;
655
656 *npolled = 0;
Talat Batheeshfaa91412017-08-17 15:50:43 +0300657 /* Find uncompleted WQEs belonging to that cq and return
Yishai Hadas35f05da2015-02-08 11:49:34 +0200658 * simulated FLUSH_ERR completions
659 */
660 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
Ariel Nahum799cdaf2015-08-09 11:16:27 +0300661 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
Yishai Hadas35f05da2015-02-08 11:49:34 +0200662 if (*npolled >= num_entries)
663 goto out;
664 }
665
666 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
667 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
668 if (*npolled >= num_entries)
669 goto out;
670 }
671
672out:
673 return;
674}
675
Roland Dreier225c7b12007-05-08 18:00:38 -0700676static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
677 struct mlx4_ib_qp **cur_qp,
678 struct ib_wc *wc)
679{
680 struct mlx4_cqe *cqe;
681 struct mlx4_qp *mqp;
682 struct mlx4_ib_wq *wq;
683 struct mlx4_ib_srq *srq;
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000684 struct mlx4_srq *msrq = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700685 int is_send;
686 int is_error;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200687 int is_eth;
Roland Dreierb3226182008-01-25 14:15:34 -0800688 u32 g_mlpath_rqpn;
Roland Dreier225c7b12007-05-08 18:00:38 -0700689 u16 wqe_ctr;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000690 unsigned tail = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700691
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700692repoll:
Roland Dreier225c7b12007-05-08 18:00:38 -0700693 cqe = next_cqe_sw(cq);
694 if (!cqe)
695 return -EAGAIN;
696
Or Gerlitz08ff3232012-10-21 14:59:24 +0000697 if (cq->buf.entry_size == 64)
698 cqe++;
699
Roland Dreier225c7b12007-05-08 18:00:38 -0700700 ++cq->mcq.cons_index;
701
702 /*
703 * Make sure we read CQ entry contents after we've checked the
704 * ownership bit.
705 */
706 rmb();
707
708 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
709 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
710 MLX4_CQE_OPCODE_ERROR;
711
Vladimir Sokolovskybbf8eed12008-04-16 21:09:33 -0700712 /* Resize CQ in progress */
713 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
714 if (cq->resize_buf) {
715 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
716
717 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
718 cq->buf = cq->resize_buf->buf;
719 cq->ibcq.cqe = cq->resize_buf->cqe;
720
721 kfree(cq->resize_buf);
722 cq->resize_buf = NULL;
723 }
724
725 goto repoll;
726 }
727
Roland Dreier225c7b12007-05-08 18:00:38 -0700728 if (!*cur_qp ||
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700729 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700730 /*
731 * We do not have to take the QP table lock here,
732 * because CQs will be locked while QPs are removed
733 * from the table.
734 */
735 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700736 be32_to_cpu(cqe->vlan_my_qpn));
Roland Dreier225c7b12007-05-08 18:00:38 -0700737 *cur_qp = to_mibqp(mqp);
738 }
739
740 wc->qp = &(*cur_qp)->ibqp;
741
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000742 if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
743 u32 srq_num;
744 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
745 srq_num = g_mlpath_rqpn & 0xffffff;
746 /* SRQ is also in the radix tree */
747 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
748 srq_num);
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000749 }
750
Roland Dreier225c7b12007-05-08 18:00:38 -0700751 if (is_send) {
752 wq = &(*cur_qp)->sq;
Jack Morgensteinea54b102008-01-28 10:40:59 +0200753 if (!(*cur_qp)->sq_signal_bits) {
754 wqe_ctr = be16_to_cpu(cqe->wqe_index);
755 wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
756 }
Roland Dreier0e6e7412007-06-18 08:13:48 -0700757 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
Roland Dreier225c7b12007-05-08 18:00:38 -0700758 ++wq->tail;
759 } else if ((*cur_qp)->ibqp.srq) {
760 srq = to_msrq((*cur_qp)->ibqp.srq);
761 wqe_ctr = be16_to_cpu(cqe->wqe_index);
762 wc->wr_id = srq->wrid[wqe_ctr];
763 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
Shlomo Pongratzf3cca4b2013-04-10 14:26:48 +0000764 } else if (msrq) {
765 srq = to_mibsrq(msrq);
766 wqe_ctr = be16_to_cpu(cqe->wqe_index);
767 wc->wr_id = srq->wrid[wqe_ctr];
768 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
Roland Dreier225c7b12007-05-08 18:00:38 -0700769 } else {
770 wq = &(*cur_qp)->rq;
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000771 tail = wq->tail & (wq->wqe_cnt - 1);
772 wc->wr_id = wq->wrid[tail];
Roland Dreier225c7b12007-05-08 18:00:38 -0700773 ++wq->tail;
774 }
775
776 if (unlikely(is_error)) {
777 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
778 return 0;
779 }
780
781 wc->status = IB_WC_SUCCESS;
782
783 if (is_send) {
784 wc->wc_flags = 0;
785 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
786 case MLX4_OPCODE_RDMA_WRITE_IMM:
787 wc->wc_flags |= IB_WC_WITH_IMM;
Bart Van Assche8aff1fb2017-10-11 10:49:06 -0700788 /* fall through */
Roland Dreier225c7b12007-05-08 18:00:38 -0700789 case MLX4_OPCODE_RDMA_WRITE:
790 wc->opcode = IB_WC_RDMA_WRITE;
791 break;
792 case MLX4_OPCODE_SEND_IMM:
793 wc->wc_flags |= IB_WC_WITH_IMM;
Bart Van Assche8aff1fb2017-10-11 10:49:06 -0700794 /* fall through */
Roland Dreier225c7b12007-05-08 18:00:38 -0700795 case MLX4_OPCODE_SEND:
Roland Dreier95d04f02008-07-23 08:12:26 -0700796 case MLX4_OPCODE_SEND_INVAL:
Roland Dreier225c7b12007-05-08 18:00:38 -0700797 wc->opcode = IB_WC_SEND;
798 break;
799 case MLX4_OPCODE_RDMA_READ:
Vu Pham19891912007-08-03 14:25:48 -0700800 wc->opcode = IB_WC_RDMA_READ;
Roland Dreier225c7b12007-05-08 18:00:38 -0700801 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
802 break;
803 case MLX4_OPCODE_ATOMIC_CS:
804 wc->opcode = IB_WC_COMP_SWAP;
805 wc->byte_len = 8;
806 break;
807 case MLX4_OPCODE_ATOMIC_FA:
808 wc->opcode = IB_WC_FETCH_ADD;
809 wc->byte_len = 8;
810 break;
Vladimir Sokolovsky6fa8f712010-04-14 17:23:39 +0300811 case MLX4_OPCODE_MASKED_ATOMIC_CS:
812 wc->opcode = IB_WC_MASKED_COMP_SWAP;
813 wc->byte_len = 8;
814 break;
815 case MLX4_OPCODE_MASKED_ATOMIC_FA:
816 wc->opcode = IB_WC_MASKED_FETCH_ADD;
817 wc->byte_len = 8;
818 break;
Eli Cohenb832be12008-04-16 21:09:27 -0700819 case MLX4_OPCODE_LSO:
820 wc->opcode = IB_WC_LSO;
821 break;
Roland Dreier95d04f02008-07-23 08:12:26 -0700822 case MLX4_OPCODE_FMR:
Sagi Grimberge761c672015-10-13 19:11:43 +0300823 wc->opcode = IB_WC_REG_MR;
Roland Dreier95d04f02008-07-23 08:12:26 -0700824 break;
825 case MLX4_OPCODE_LOCAL_INVAL:
826 wc->opcode = IB_WC_LOCAL_INV;
827 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700828 }
829 } else {
830 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
831
832 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
833 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
Steve Wise00f7ec32008-07-14 23:48:45 -0700834 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
835 wc->wc_flags = IB_WC_WITH_IMM;
836 wc->ex.imm_data = cqe->immed_rss_invalid;
Roland Dreier225c7b12007-05-08 18:00:38 -0700837 break;
Roland Dreier95d04f02008-07-23 08:12:26 -0700838 case MLX4_RECV_OPCODE_SEND_INVAL:
839 wc->opcode = IB_WC_RECV;
840 wc->wc_flags = IB_WC_WITH_INVALIDATE;
841 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
842 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700843 case MLX4_RECV_OPCODE_SEND:
844 wc->opcode = IB_WC_RECV;
845 wc->wc_flags = 0;
846 break;
847 case MLX4_RECV_OPCODE_SEND_IMM:
Steve Wise00f7ec32008-07-14 23:48:45 -0700848 wc->opcode = IB_WC_RECV;
849 wc->wc_flags = IB_WC_WITH_IMM;
850 wc->ex.imm_data = cqe->immed_rss_invalid;
Roland Dreier225c7b12007-05-08 18:00:38 -0700851 break;
852 }
853
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200854 is_eth = (rdma_port_get_link_layer(wc->qp->device,
855 (*cur_qp)->port) ==
856 IB_LINK_LAYER_ETHERNET);
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000857 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
858 if ((*cur_qp)->mlx4_ib_qp_type &
859 (MLX4_IB_QPT_PROXY_SMI_OWNER |
Yuval Shaiae6a00f62016-07-27 01:24:52 -0700860 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
861 use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
862 is_eth);
863 return 0;
864 }
Jack Morgenstein1ffeb2e2012-08-03 08:40:40 +0000865 }
866
Roland Dreierb3226182008-01-25 14:15:34 -0800867 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
868 wc->src_qp = g_mlpath_rqpn & 0xffffff;
869 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
870 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
Dotan Barake1bb7842008-01-07 09:01:25 +0200871 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
Or Gerlitzd927d502012-01-11 19:03:51 +0200872 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
873 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200874 if (is_eth) {
Moni Shoua65389322018-02-25 13:39:54 +0200875 wc->slid = 0;
Or Gerlitz9106c412011-12-11 16:40:05 +0200876 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200877 if (be32_to_cpu(cqe->vlan_my_qpn) &
Hadar Hen Zione802f8e2015-07-27 14:46:33 +0300878 MLX4_CQE_CVLAN_PRESENT_MASK) {
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200879 wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
880 MLX4_CQE_VID_MASK;
881 } else {
882 wc->vlan_id = 0xffff;
883 }
884 memcpy(wc->smac, cqe->smac, ETH_ALEN);
885 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
Moni Shoua297e0da2013-12-12 18:03:14 +0200886 } else {
Moni Shoua65389322018-02-25 13:39:54 +0200887 wc->slid = be16_to_cpu(cqe->rlid);
Jack Morgenstein5ea8bbf2014-03-12 12:00:41 +0200888 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
Moni Shoua297e0da2013-12-12 18:03:14 +0200889 wc->vlan_id = 0xffff;
890 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700891 }
892
893 return 0;
894}
895
896int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
897{
898 struct mlx4_ib_cq *cq = to_mcq(ibcq);
899 struct mlx4_ib_qp *cur_qp = NULL;
900 unsigned long flags;
901 int npolled;
Yishai Hadas35f05da2015-02-08 11:49:34 +0200902 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
Roland Dreier225c7b12007-05-08 18:00:38 -0700903
904 spin_lock_irqsave(&cq->lock, flags);
Yishai Hadas35f05da2015-02-08 11:49:34 +0200905 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
906 mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
907 goto out;
908 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700909
910 for (npolled = 0; npolled < num_entries; ++npolled) {
Leon Romanovsky20697432016-08-28 10:58:33 +0300911 if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
Roland Dreier225c7b12007-05-08 18:00:38 -0700912 break;
913 }
914
Eli Cohen3616f9c2012-03-06 15:50:51 +0200915 mlx4_cq_set_ci(&cq->mcq);
Roland Dreier225c7b12007-05-08 18:00:38 -0700916
Yishai Hadas35f05da2015-02-08 11:49:34 +0200917out:
Roland Dreier225c7b12007-05-08 18:00:38 -0700918 spin_unlock_irqrestore(&cq->lock, flags);
919
Leon Romanovsky20697432016-08-28 10:58:33 +0300920 return npolled;
Roland Dreier225c7b12007-05-08 18:00:38 -0700921}
922
923int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
924{
925 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
926 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
927 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
928 to_mdev(ibcq->device)->uar_map,
929 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
930
931 return 0;
932}
933
934void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
935{
936 u32 prod_index;
937 int nfreed = 0;
Jack Morgenstein082dee32007-06-18 08:13:59 -0700938 struct mlx4_cqe *cqe, *dest;
939 u8 owner_bit;
Or Gerlitz08ff3232012-10-21 14:59:24 +0000940 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700941
942 /*
943 * First we need to find the current producer index, so we
944 * know where to start cleaning from. It doesn't matter if HW
945 * adds new entries after this loop -- the QP we're worried
946 * about is already in RESET, so the new entries won't come
947 * from our QP and therefore don't need to be checked.
948 */
949 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
950 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
951 break;
952
953 /*
954 * Now sweep backwards through the CQ, removing CQ entries
955 * that match our QP by copying older entries on top of them.
956 */
957 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
958 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000959 cqe += cqe_inc;
960
Yevgeny Petrilinf780a9f2008-08-06 20:14:06 -0700961 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700962 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
963 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
964 ++nfreed;
Jack Morgenstein082dee32007-06-18 08:13:59 -0700965 } else if (nfreed) {
966 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
Or Gerlitz08ff3232012-10-21 14:59:24 +0000967 dest += cqe_inc;
968
Jack Morgenstein082dee32007-06-18 08:13:59 -0700969 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
970 memcpy(dest, cqe, sizeof *cqe);
971 dest->owner_sr_opcode = owner_bit |
972 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
973 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700974 }
975
976 if (nfreed) {
977 cq->mcq.cons_index += nfreed;
978 /*
979 * Make sure update of buffer contents is done before
980 * updating consumer index.
981 */
982 wmb();
983 mlx4_cq_set_ci(&cq->mcq);
984 }
985}
986
987void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
988{
989 spin_lock_irq(&cq->lock);
990 __mlx4_ib_cq_clean(cq, qpn, srq);
991 spin_unlock_irq(&cq->lock);
992}