blob: 74cb50104e0e2044cf596914b0fff230cbb19ddc [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Vinit Agnihotrie2eed582013-03-14 18:13:41 +00002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_mad.h>
36#include <rdma/ib_user_verbs.h>
37#include <linux/io.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040038#include <linux/module.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070039#include <linux/utsname.h>
40#include <linux/rculist.h>
41#include <linux/mm.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040042#include <linux/random.h>
Mike Marciniszynd6f1c172015-07-21 08:36:07 -040043#include <linux/vmalloc.h>
Dennis Dalessandroeb636ac2016-01-22 12:44:36 -080044#include <rdma/rdma_vt.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070045
46#include "qib.h"
47#include "qib_common.h"
48
Mike Marciniszynaf061a62011-09-23 13:16:44 -040049static unsigned int ib_qib_qp_table_size = 256;
Ralph Campbellf9315512010-05-23 21:44:54 -070050module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51MODULE_PARM_DESC(qp_table_size, "QP table size");
52
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080053static unsigned int qib_lkey_table_size = 16;
54module_param_named(lkey_table_size, qib_lkey_table_size, uint,
Ralph Campbellf9315512010-05-23 21:44:54 -070055 S_IRUGO);
56MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
58
59static unsigned int ib_qib_max_pds = 0xFFFF;
60module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
63
64static unsigned int ib_qib_max_ahs = 0xFFFF;
65module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
67
68unsigned int ib_qib_max_cqes = 0x2FFFF;
69module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
72
73unsigned int ib_qib_max_cqs = 0x1FFFF;
74module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
76
77unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
80
81unsigned int ib_qib_max_qps = 16384;
82module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
84
85unsigned int ib_qib_max_sges = 0x60;
86module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
88
89unsigned int ib_qib_max_mcast_grps = 16384;
90module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
93
94unsigned int ib_qib_max_mcast_qp_attached = 16;
95module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
96 uint, S_IRUGO);
97MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
99
100unsigned int ib_qib_max_srqs = 1024;
101module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
103
104unsigned int ib_qib_max_srq_sges = 128;
105module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
107
108unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111
112static unsigned int ib_qib_disable_sma;
113module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114MODULE_PARM_DESC(disable_sma, "Disable the SMA");
115
116/*
117 * Note that it is OK to post send work requests in the SQE and ERR
118 * states; qib_do_send() will process them and generate error
119 * completions as per IB 1.2 C10-96.
120 */
121const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
122 [IB_QPS_RESET] = 0,
123 [IB_QPS_INIT] = QIB_POST_RECV_OK,
124 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
125 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
126 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
127 QIB_PROCESS_NEXT_SEND_OK,
128 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
129 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
130 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
131 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
132 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
133 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
134};
135
136struct qib_ucontext {
137 struct ib_ucontext ibucontext;
138};
139
140static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
141 *ibucontext)
142{
143 return container_of(ibucontext, struct qib_ucontext, ibucontext);
144}
145
146/*
147 * Translate ib_wr_opcode into ib_wc_opcode.
148 */
149const enum ib_wc_opcode ib_qib_wc_opcode[] = {
150 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
151 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
152 [IB_WR_SEND] = IB_WC_SEND,
153 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
154 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
155 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
156 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
157};
158
159/*
160 * System image GUID.
161 */
162__be64 ib_qib_sys_image_guid;
163
164/**
165 * qib_copy_sge - copy data to SGE memory
166 * @ss: the SGE state
167 * @data: the data to copy
168 * @length: the length of the data
169 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800170void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
Ralph Campbellf9315512010-05-23 21:44:54 -0700171{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800172 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700173
174 while (length) {
175 u32 len = sge->length;
176
177 if (len > length)
178 len = length;
179 if (len > sge->sge_length)
180 len = sge->sge_length;
181 BUG_ON(len == 0);
182 memcpy(sge->vaddr, data, len);
183 sge->vaddr += len;
184 sge->length -= len;
185 sge->sge_length -= len;
186 if (sge->sge_length == 0) {
187 if (release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800188 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700189 if (--ss->num_sge)
190 *sge = *ss->sg_list++;
191 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800192 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700193 if (++sge->m >= sge->mr->mapsz)
194 break;
195 sge->n = 0;
196 }
197 sge->vaddr =
198 sge->mr->map[sge->m]->segs[sge->n].vaddr;
199 sge->length =
200 sge->mr->map[sge->m]->segs[sge->n].length;
201 }
202 data += len;
203 length -= len;
204 }
205}
206
207/**
208 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
209 * @ss: the SGE state
210 * @length: the number of bytes to skip
211 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800212void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
Ralph Campbellf9315512010-05-23 21:44:54 -0700213{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800214 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700215
216 while (length) {
217 u32 len = sge->length;
218
219 if (len > length)
220 len = length;
221 if (len > sge->sge_length)
222 len = sge->sge_length;
223 BUG_ON(len == 0);
224 sge->vaddr += len;
225 sge->length -= len;
226 sge->sge_length -= len;
227 if (sge->sge_length == 0) {
228 if (release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800229 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700230 if (--ss->num_sge)
231 *sge = *ss->sg_list++;
232 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800233 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700234 if (++sge->m >= sge->mr->mapsz)
235 break;
236 sge->n = 0;
237 }
238 sge->vaddr =
239 sge->mr->map[sge->m]->segs[sge->n].vaddr;
240 sge->length =
241 sge->mr->map[sge->m]->segs[sge->n].length;
242 }
243 length -= len;
244 }
245}
246
247/*
248 * Count the number of DMA descriptors needed to send length bytes of data.
249 * Don't modify the qib_sge_state to get the count.
250 * Return zero if any of the segments is not aligned.
251 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800252static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700253{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800254 struct rvt_sge *sg_list = ss->sg_list;
255 struct rvt_sge sge = ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700256 u8 num_sge = ss->num_sge;
257 u32 ndesc = 1; /* count the header */
258
259 while (length) {
260 u32 len = sge.length;
261
262 if (len > length)
263 len = length;
264 if (len > sge.sge_length)
265 len = sge.sge_length;
266 BUG_ON(len == 0);
267 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
268 (len != length && (len & (sizeof(u32) - 1)))) {
269 ndesc = 0;
270 break;
271 }
272 ndesc++;
273 sge.vaddr += len;
274 sge.length -= len;
275 sge.sge_length -= len;
276 if (sge.sge_length == 0) {
277 if (--num_sge)
278 sge = *sg_list++;
279 } else if (sge.length == 0 && sge.mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800280 if (++sge.n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700281 if (++sge.m >= sge.mr->mapsz)
282 break;
283 sge.n = 0;
284 }
285 sge.vaddr =
286 sge.mr->map[sge.m]->segs[sge.n].vaddr;
287 sge.length =
288 sge.mr->map[sge.m]->segs[sge.n].length;
289 }
290 length -= len;
291 }
292 return ndesc;
293}
294
295/*
296 * Copy from the SGEs to the data buffer.
297 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800298static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700299{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800300 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700301
302 while (length) {
303 u32 len = sge->length;
304
305 if (len > length)
306 len = length;
307 if (len > sge->sge_length)
308 len = sge->sge_length;
309 BUG_ON(len == 0);
310 memcpy(data, sge->vaddr, len);
311 sge->vaddr += len;
312 sge->length -= len;
313 sge->sge_length -= len;
314 if (sge->sge_length == 0) {
315 if (--ss->num_sge)
316 *sge = *ss->sg_list++;
317 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800318 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700319 if (++sge->m >= sge->mr->mapsz)
320 break;
321 sge->n = 0;
322 }
323 sge->vaddr =
324 sge->mr->map[sge->m]->segs[sge->n].vaddr;
325 sge->length =
326 sge->mr->map[sge->m]->segs[sge->n].length;
327 }
328 data += len;
329 length -= len;
330 }
331}
332
333/**
334 * qib_post_one_send - post one RC, UC, or UD send work request
335 * @qp: the QP to post on
336 * @wr: the work request to send
337 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800338static int qib_post_one_send(struct rvt_qp *qp, struct ib_send_wr *wr,
339 int *scheduled)
Ralph Campbellf9315512010-05-23 21:44:54 -0700340{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800341 struct rvt_swqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700342 u32 next;
343 int i;
344 int j;
345 int acc;
346 int ret;
347 unsigned long flags;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800348 struct rvt_lkey_table *rkt;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -0800349 struct rvt_pd *pd;
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500350 int avoid_schedule = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700351
352 spin_lock_irqsave(&qp->s_lock, flags);
353
354 /* Check that state is OK to post send. */
355 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
356 goto bail_inval;
357
358 /* IB spec says that num_sge == 0 is OK. */
359 if (wr->num_sge > qp->s_max_sge)
360 goto bail_inval;
361
362 /*
363 * Don't allow RDMA reads or atomic operations on UC or
364 * undefined operations.
365 * Make sure buffer is large enough to hold the result for atomics.
366 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800367 if (qp->ibqp.qp_type == IB_QPT_UC) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700368 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
369 goto bail_inval;
370 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
371 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
372 if (wr->opcode != IB_WR_SEND &&
373 wr->opcode != IB_WR_SEND_WITH_IMM)
374 goto bail_inval;
375 /* Check UD destination address PD */
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100376 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
Ralph Campbellf9315512010-05-23 21:44:54 -0700377 goto bail_inval;
378 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
379 goto bail_inval;
380 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
381 (wr->num_sge == 0 ||
382 wr->sg_list[0].length < sizeof(u64) ||
383 wr->sg_list[0].addr & (sizeof(u64) - 1)))
384 goto bail_inval;
385 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
386 goto bail_inval;
387
388 next = qp->s_head + 1;
389 if (next >= qp->s_size)
390 next = 0;
391 if (next == qp->s_last) {
392 ret = -ENOMEM;
393 goto bail;
394 }
395
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800396 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -0800397 pd = ibpd_to_rvtpd(qp->ibqp.pd);
Ralph Campbellf9315512010-05-23 21:44:54 -0700398 wqe = get_swqe_ptr(qp, qp->s_head);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100399
400 if (qp->ibqp.qp_type != IB_QPT_UC &&
401 qp->ibqp.qp_type != IB_QPT_RC)
402 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
Sagi Grimberg38071a42015-10-13 19:11:31 +0300403 else if (wr->opcode == IB_WR_REG_MR)
404 memcpy(&wqe->reg_wr, reg_wr(wr),
405 sizeof(wqe->reg_wr));
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100406 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
407 wr->opcode == IB_WR_RDMA_WRITE ||
408 wr->opcode == IB_WR_RDMA_READ)
409 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
410 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
411 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
412 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
413 else
414 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
415
Ralph Campbellf9315512010-05-23 21:44:54 -0700416 wqe->length = 0;
417 j = 0;
418 if (wr->num_sge) {
419 acc = wr->opcode >= IB_WR_RDMA_READ ?
420 IB_ACCESS_LOCAL_WRITE : 0;
421 for (i = 0; i < wr->num_sge; i++) {
422 u32 length = wr->sg_list[i].length;
423 int ok;
424
425 if (length == 0)
426 continue;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800427 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
Ralph Campbellf9315512010-05-23 21:44:54 -0700428 &wr->sg_list[i], acc);
429 if (!ok)
430 goto bail_inval_free;
431 wqe->length += length;
432 j++;
433 }
434 wqe->wr.num_sge = j;
435 }
436 if (qp->ibqp.qp_type == IB_QPT_UC ||
437 qp->ibqp.qp_type == IB_QPT_RC) {
438 if (wqe->length > 0x80000000U)
439 goto bail_inval_free;
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500440 if (wqe->length <= qp->pmtu)
441 avoid_schedule = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -0700442 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500443 qp->port_num - 1)->ibmtu) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700444 goto bail_inval_free;
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500445 } else {
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800446 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500447 avoid_schedule = 1;
448 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700449 wqe->ssn = qp->s_ssn++;
450 qp->s_head = next;
451
452 ret = 0;
453 goto bail;
454
455bail_inval_free:
456 while (j) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800457 struct rvt_sge *sge = &wqe->sg_list[--j];
Ralph Campbellf9315512010-05-23 21:44:54 -0700458
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800459 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700460 }
461bail_inval:
462 ret = -EINVAL;
463bail:
Mike Marciniszyn967bcfc2015-12-24 11:19:23 -0500464 if (!ret && !wr->next && !avoid_schedule &&
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000465 !qib_sdma_empty(
466 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
467 qib_schedule_send(qp);
468 *scheduled = 1;
469 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700470 spin_unlock_irqrestore(&qp->s_lock, flags);
471 return ret;
472}
473
474/**
475 * qib_post_send - post a send on a QP
476 * @ibqp: the QP to post the send on
477 * @wr: the list of work requests to post
478 * @bad_wr: the first bad WR is put here
479 *
480 * This may be called from interrupt context.
481 */
482static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
483 struct ib_send_wr **bad_wr)
484{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800485 struct rvt_qp *qp = to_iqp(ibqp);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800486 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700487 int err = 0;
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000488 int scheduled = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -0700489
490 for (; wr; wr = wr->next) {
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000491 err = qib_post_one_send(qp, wr, &scheduled);
Ralph Campbellf9315512010-05-23 21:44:54 -0700492 if (err) {
493 *bad_wr = wr;
494 goto bail;
495 }
496 }
497
498 /* Try to do the send work in the caller's context. */
Mike Marciniszyn551ace12012-07-19 13:03:56 +0000499 if (!scheduled)
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800500 qib_do_send(&priv->s_work);
Ralph Campbellf9315512010-05-23 21:44:54 -0700501
502bail:
503 return err;
504}
505
506/**
507 * qib_post_receive - post a receive on a QP
508 * @ibqp: the QP to post the receive on
509 * @wr: the WR to post
510 * @bad_wr: the first bad WR is put here
511 *
512 * This may be called from interrupt context.
513 */
514static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
515 struct ib_recv_wr **bad_wr)
516{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800517 struct rvt_qp *qp = to_iqp(ibqp);
518 struct rvt_rwq *wq = qp->r_rq.wq;
Ralph Campbellf9315512010-05-23 21:44:54 -0700519 unsigned long flags;
520 int ret;
521
522 /* Check that state is OK to post receive. */
523 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
524 *bad_wr = wr;
525 ret = -EINVAL;
526 goto bail;
527 }
528
529 for (; wr; wr = wr->next) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800530 struct rvt_rwqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700531 u32 next;
532 int i;
533
534 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
535 *bad_wr = wr;
536 ret = -EINVAL;
537 goto bail;
538 }
539
540 spin_lock_irqsave(&qp->r_rq.lock, flags);
541 next = wq->head + 1;
542 if (next >= qp->r_rq.size)
543 next = 0;
544 if (next == wq->tail) {
545 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
546 *bad_wr = wr;
547 ret = -ENOMEM;
548 goto bail;
549 }
550
551 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
552 wqe->wr_id = wr->wr_id;
553 wqe->num_sge = wr->num_sge;
554 for (i = 0; i < wr->num_sge; i++)
555 wqe->sg_list[i] = wr->sg_list[i];
556 /* Make sure queue entry is written before the head index. */
557 smp_wmb();
558 wq->head = next;
559 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
560 }
561 ret = 0;
562
563bail:
564 return ret;
565}
566
567/**
568 * qib_qp_rcv - processing an incoming packet on a QP
569 * @rcd: the context pointer
570 * @hdr: the packet header
571 * @has_grh: true if the packet has a GRH
572 * @data: the packet data
573 * @tlen: the packet length
574 * @qp: the QP the packet came on
575 *
576 * This is called from qib_ib_rcv() to process an incoming packet
577 * for the given QP.
578 * Called at interrupt level.
579 */
580static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800581 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700582{
583 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
584
Ralph Campbella5210c12010-08-02 22:39:30 +0000585 spin_lock(&qp->r_lock);
586
Ralph Campbellf9315512010-05-23 21:44:54 -0700587 /* Check for valid receive state. */
588 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800589 ibp->rvp.n_pkt_drops++;
Ralph Campbella5210c12010-08-02 22:39:30 +0000590 goto unlock;
Ralph Campbellf9315512010-05-23 21:44:54 -0700591 }
592
593 switch (qp->ibqp.qp_type) {
594 case IB_QPT_SMI:
595 case IB_QPT_GSI:
596 if (ib_qib_disable_sma)
597 break;
598 /* FALLTHROUGH */
599 case IB_QPT_UD:
600 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
601 break;
602
603 case IB_QPT_RC:
604 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
605 break;
606
607 case IB_QPT_UC:
608 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
609 break;
610
611 default:
612 break;
613 }
Ralph Campbella5210c12010-08-02 22:39:30 +0000614
615unlock:
616 spin_unlock(&qp->r_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700617}
618
619/**
620 * qib_ib_rcv - process an incoming packet
621 * @rcd: the context pointer
622 * @rhdr: the header of the packet
623 * @data: the packet payload
624 * @tlen: the packet length
625 *
626 * This is called from qib_kreceive() to process an incoming packet at
627 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
628 */
629void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
630{
631 struct qib_pportdata *ppd = rcd->ppd;
632 struct qib_ibport *ibp = &ppd->ibport_data;
633 struct qib_ib_header *hdr = rhdr;
634 struct qib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800635 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700636 u32 qp_num;
637 int lnh;
638 u8 opcode;
639 u16 lid;
640
641 /* 24 == LRH+BTH+CRC */
642 if (unlikely(tlen < 24))
643 goto drop;
644
645 /* Check for a valid destination LID (see ch. 7.11.1). */
646 lid = be16_to_cpu(hdr->lrh[1]);
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800647 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700648 lid &= ~((1 << ppd->lmc) - 1);
649 if (unlikely(lid != ppd->lid))
650 goto drop;
651 }
652
653 /* Check for GRH */
654 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
655 if (lnh == QIB_LRH_BTH)
656 ohdr = &hdr->u.oth;
657 else if (lnh == QIB_LRH_GRH) {
658 u32 vtf;
659
660 ohdr = &hdr->u.l.oth;
661 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
662 goto drop;
663 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
664 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
665 goto drop;
666 } else
667 goto drop;
668
Mike Marciniszynddb88762013-06-15 17:07:03 -0400669 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
670#ifdef CONFIG_DEBUG_FS
671 rcd->opstats->stats[opcode].n_bytes += tlen;
672 rcd->opstats->stats[opcode].n_packets++;
673#endif
Ralph Campbellf9315512010-05-23 21:44:54 -0700674
675 /* Get the destination QP number. */
676 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
677 if (qp_num == QIB_MULTICAST_QPN) {
678 struct qib_mcast *mcast;
679 struct qib_mcast_qp *p;
680
681 if (lnh != QIB_LRH_GRH)
682 goto drop;
683 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
684 if (mcast == NULL)
685 goto drop;
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500686 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700687 list_for_each_entry_rcu(p, &mcast->qp_list, list)
688 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
689 /*
690 * Notify qib_multicast_detach() if it is waiting for us
691 * to finish.
692 */
693 if (atomic_dec_return(&mcast->refcount) <= 1)
694 wake_up(&mcast->wait);
695 } else {
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400696 if (rcd->lookaside_qp) {
697 if (rcd->lookaside_qpn != qp_num) {
698 if (atomic_dec_and_test(
699 &rcd->lookaside_qp->refcount))
700 wake_up(
701 &rcd->lookaside_qp->wait);
Yann Droneaud8572de92014-03-10 23:06:29 +0100702 rcd->lookaside_qp = NULL;
703 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400704 }
705 if (!rcd->lookaside_qp) {
706 qp = qib_lookup_qpn(ibp, qp_num);
707 if (!qp)
708 goto drop;
709 rcd->lookaside_qp = qp;
710 rcd->lookaside_qpn = qp_num;
711 } else
712 qp = rcd->lookaside_qp;
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500713 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700714 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700715 }
716 return;
717
718drop:
Harish Chegondif24a6d42016-01-22 12:56:02 -0800719 ibp->rvp.n_pkt_drops++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700720}
721
722/*
723 * This is called from a timer to check for QPs
724 * which need kernel memory in order to send a packet.
725 */
726static void mem_timer(unsigned long data)
727{
728 struct qib_ibdev *dev = (struct qib_ibdev *) data;
729 struct list_head *list = &dev->memwait;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800730 struct rvt_qp *qp = NULL;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800731 struct qib_qp_priv *priv = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700732 unsigned long flags;
733
Harish Chegondicd182012016-01-22 12:56:14 -0800734 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700735 if (!list_empty(list)) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800736 priv = list_entry(list->next, struct qib_qp_priv, iowait);
737 qp = priv->owner;
738 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700739 atomic_inc(&qp->refcount);
740 if (!list_empty(list))
741 mod_timer(&dev->mem_timer, jiffies + 1);
742 }
Harish Chegondicd182012016-01-22 12:56:14 -0800743 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700744
745 if (qp) {
746 spin_lock_irqsave(&qp->s_lock, flags);
747 if (qp->s_flags & QIB_S_WAIT_KMEM) {
748 qp->s_flags &= ~QIB_S_WAIT_KMEM;
749 qib_schedule_send(qp);
750 }
751 spin_unlock_irqrestore(&qp->s_lock, flags);
752 if (atomic_dec_and_test(&qp->refcount))
753 wake_up(&qp->wait);
754 }
755}
756
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800757static void update_sge(struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700758{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800759 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700760
761 sge->vaddr += length;
762 sge->length -= length;
763 sge->sge_length -= length;
764 if (sge->sge_length == 0) {
765 if (--ss->num_sge)
766 *sge = *ss->sg_list++;
767 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800768 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700769 if (++sge->m >= sge->mr->mapsz)
770 return;
771 sge->n = 0;
772 }
773 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
774 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
775 }
776}
777
778#ifdef __LITTLE_ENDIAN
779static inline u32 get_upper_bits(u32 data, u32 shift)
780{
781 return data >> shift;
782}
783
784static inline u32 set_upper_bits(u32 data, u32 shift)
785{
786 return data << shift;
787}
788
789static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
790{
791 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
792 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
793 return data;
794}
795#else
796static inline u32 get_upper_bits(u32 data, u32 shift)
797{
798 return data << shift;
799}
800
801static inline u32 set_upper_bits(u32 data, u32 shift)
802{
803 return data >> shift;
804}
805
806static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
807{
808 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
809 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
810 return data;
811}
812#endif
813
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800814static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
Ralph Campbellf9315512010-05-23 21:44:54 -0700815 u32 length, unsigned flush_wc)
816{
817 u32 extra = 0;
818 u32 data = 0;
819 u32 last;
820
821 while (1) {
822 u32 len = ss->sge.length;
823 u32 off;
824
825 if (len > length)
826 len = length;
827 if (len > ss->sge.sge_length)
828 len = ss->sge.sge_length;
829 BUG_ON(len == 0);
830 /* If the source address is not aligned, try to align it. */
831 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
832 if (off) {
833 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
834 ~(sizeof(u32) - 1));
835 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
836 u32 y;
837
838 y = sizeof(u32) - off;
839 if (len > y)
840 len = y;
841 if (len + extra >= sizeof(u32)) {
842 data |= set_upper_bits(v, extra *
843 BITS_PER_BYTE);
844 len = sizeof(u32) - extra;
845 if (len == length) {
846 last = data;
847 break;
848 }
849 __raw_writel(data, piobuf);
850 piobuf++;
851 extra = 0;
852 data = 0;
853 } else {
854 /* Clear unused upper bytes */
855 data |= clear_upper_bytes(v, len, extra);
856 if (len == length) {
857 last = data;
858 break;
859 }
860 extra += len;
861 }
862 } else if (extra) {
863 /* Source address is aligned. */
864 u32 *addr = (u32 *) ss->sge.vaddr;
865 int shift = extra * BITS_PER_BYTE;
866 int ushift = 32 - shift;
867 u32 l = len;
868
869 while (l >= sizeof(u32)) {
870 u32 v = *addr;
871
872 data |= set_upper_bits(v, shift);
873 __raw_writel(data, piobuf);
874 data = get_upper_bits(v, ushift);
875 piobuf++;
876 addr++;
877 l -= sizeof(u32);
878 }
879 /*
880 * We still have 'extra' number of bytes leftover.
881 */
882 if (l) {
883 u32 v = *addr;
884
885 if (l + extra >= sizeof(u32)) {
886 data |= set_upper_bits(v, shift);
887 len -= l + extra - sizeof(u32);
888 if (len == length) {
889 last = data;
890 break;
891 }
892 __raw_writel(data, piobuf);
893 piobuf++;
894 extra = 0;
895 data = 0;
896 } else {
897 /* Clear unused upper bytes */
898 data |= clear_upper_bytes(v, l, extra);
899 if (len == length) {
900 last = data;
901 break;
902 }
903 extra += l;
904 }
905 } else if (len == length) {
906 last = data;
907 break;
908 }
909 } else if (len == length) {
910 u32 w;
911
912 /*
913 * Need to round up for the last dword in the
914 * packet.
915 */
916 w = (len + 3) >> 2;
917 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
918 piobuf += w - 1;
919 last = ((u32 *) ss->sge.vaddr)[w - 1];
920 break;
921 } else {
922 u32 w = len >> 2;
923
924 qib_pio_copy(piobuf, ss->sge.vaddr, w);
925 piobuf += w;
926
927 extra = len & (sizeof(u32) - 1);
928 if (extra) {
929 u32 v = ((u32 *) ss->sge.vaddr)[w];
930
931 /* Clear unused upper bytes */
932 data = clear_upper_bytes(v, extra, 0);
933 }
934 }
935 update_sge(ss, len);
936 length -= len;
937 }
938 /* Update address before sending packet. */
939 update_sge(ss, length);
940 if (flush_wc) {
941 /* must flush early everything before trigger word */
942 qib_flush_wc();
943 __raw_writel(last, piobuf);
944 /* be sure trigger word is written */
945 qib_flush_wc();
946 } else
947 __raw_writel(last, piobuf);
948}
949
Mike Marciniszyn48947102011-12-23 08:03:41 -0500950static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800951 struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700952{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800953 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700954 struct qib_verbs_txreq *tx;
955 unsigned long flags;
956
957 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondicd182012016-01-22 12:56:14 -0800958 spin_lock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700959
960 if (!list_empty(&dev->txreq_free)) {
961 struct list_head *l = dev->txreq_free.next;
962
963 list_del(l);
Harish Chegondicd182012016-01-22 12:56:14 -0800964 spin_unlock(&dev->rdi.pending_lock);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500965 spin_unlock_irqrestore(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700966 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
Ralph Campbellf9315512010-05-23 21:44:54 -0700967 } else {
968 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800969 list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700970 dev->n_txwait++;
971 qp->s_flags |= QIB_S_WAIT_TX;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800972 list_add_tail(&priv->iowait, &dev->txwait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700973 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700974 qp->s_flags &= ~QIB_S_BUSY;
Harish Chegondicd182012016-01-22 12:56:14 -0800975 spin_unlock(&dev->rdi.pending_lock);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500976 spin_unlock_irqrestore(&qp->s_lock, flags);
977 tx = ERR_PTR(-EBUSY);
Ralph Campbellf9315512010-05-23 21:44:54 -0700978 }
Mike Marciniszyn48947102011-12-23 08:03:41 -0500979 return tx;
980}
Ralph Campbellf9315512010-05-23 21:44:54 -0700981
Mike Marciniszyn48947102011-12-23 08:03:41 -0500982static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800983 struct rvt_qp *qp)
Mike Marciniszyn48947102011-12-23 08:03:41 -0500984{
985 struct qib_verbs_txreq *tx;
986 unsigned long flags;
Ralph Campbellf9315512010-05-23 21:44:54 -0700987
Harish Chegondicd182012016-01-22 12:56:14 -0800988 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500989 /* assume the list non empty */
990 if (likely(!list_empty(&dev->txreq_free))) {
991 struct list_head *l = dev->txreq_free.next;
992
993 list_del(l);
Harish Chegondicd182012016-01-22 12:56:14 -0800994 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500995 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
996 } else {
997 /* call slow path to get the extra lock */
Harish Chegondicd182012016-01-22 12:56:14 -0800998 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500999 tx = __get_txreq(dev, qp);
1000 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001001 return tx;
1002}
1003
1004void qib_put_txreq(struct qib_verbs_txreq *tx)
1005{
1006 struct qib_ibdev *dev;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001007 struct rvt_qp *qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001008 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001009 unsigned long flags;
1010
1011 qp = tx->qp;
1012 dev = to_idev(qp->ibqp.device);
1013
1014 if (atomic_dec_and_test(&qp->refcount))
1015 wake_up(&qp->wait);
1016 if (tx->mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001017 rvt_put_mr(tx->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001018 tx->mr = NULL;
1019 }
1020 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
1021 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
1022 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
1023 tx->txreq.addr, tx->hdr_dwords << 2,
1024 DMA_TO_DEVICE);
1025 kfree(tx->align_buf);
1026 }
1027
Harish Chegondicd182012016-01-22 12:56:14 -08001028 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001029
1030 /* Put struct back on free list */
1031 list_add(&tx->txreq.list, &dev->txreq_free);
1032
1033 if (!list_empty(&dev->txwait)) {
1034 /* Wake up first QP wanting a free struct */
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001035 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
1036 iowait);
1037 qp = priv->owner;
1038 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001039 atomic_inc(&qp->refcount);
Harish Chegondicd182012016-01-22 12:56:14 -08001040 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001041
1042 spin_lock_irqsave(&qp->s_lock, flags);
1043 if (qp->s_flags & QIB_S_WAIT_TX) {
1044 qp->s_flags &= ~QIB_S_WAIT_TX;
1045 qib_schedule_send(qp);
1046 }
1047 spin_unlock_irqrestore(&qp->s_lock, flags);
1048
1049 if (atomic_dec_and_test(&qp->refcount))
1050 wake_up(&qp->wait);
1051 } else
Harish Chegondicd182012016-01-22 12:56:14 -08001052 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001053}
1054
1055/*
1056 * This is called when there are send DMA descriptors that might be
1057 * available.
1058 *
1059 * This is called with ppd->sdma_lock held.
1060 */
1061void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
1062{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001063 struct rvt_qp *qp, *nqp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001064 struct qib_qp_priv *qpp, *nqpp;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001065 struct rvt_qp *qps[20];
Ralph Campbellf9315512010-05-23 21:44:54 -07001066 struct qib_ibdev *dev;
1067 unsigned i, n;
1068
1069 n = 0;
1070 dev = &ppd->dd->verbs_dev;
Harish Chegondicd182012016-01-22 12:56:14 -08001071 spin_lock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001072
1073 /* Search wait list for first QP wanting DMA descriptors. */
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001074 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
1075 qp = qpp->owner;
1076 nqp = nqpp->owner;
Ralph Campbellf9315512010-05-23 21:44:54 -07001077 if (qp->port_num != ppd->port)
1078 continue;
1079 if (n == ARRAY_SIZE(qps))
1080 break;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001081 if (qpp->s_tx->txreq.sg_count > avail)
Ralph Campbellf9315512010-05-23 21:44:54 -07001082 break;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001083 avail -= qpp->s_tx->txreq.sg_count;
1084 list_del_init(&qpp->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001085 atomic_inc(&qp->refcount);
1086 qps[n++] = qp;
1087 }
1088
Harish Chegondicd182012016-01-22 12:56:14 -08001089 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001090
1091 for (i = 0; i < n; i++) {
1092 qp = qps[i];
1093 spin_lock(&qp->s_lock);
1094 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1095 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1096 qib_schedule_send(qp);
1097 }
1098 spin_unlock(&qp->s_lock);
1099 if (atomic_dec_and_test(&qp->refcount))
1100 wake_up(&qp->wait);
1101 }
1102}
1103
1104/*
1105 * This is called with ppd->sdma_lock held.
1106 */
1107static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1108{
1109 struct qib_verbs_txreq *tx =
1110 container_of(cookie, struct qib_verbs_txreq, txreq);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001111 struct rvt_qp *qp = tx->qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001112 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001113
1114 spin_lock(&qp->s_lock);
1115 if (tx->wqe)
1116 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1117 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1118 struct qib_ib_header *hdr;
1119
1120 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1121 hdr = &tx->align_buf->hdr;
1122 else {
1123 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1124
1125 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1126 }
1127 qib_rc_send_complete(qp, hdr);
1128 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001129 if (atomic_dec_and_test(&priv->s_dma_busy)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001130 if (qp->state == IB_QPS_RESET)
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001131 wake_up(&priv->wait_dma);
Ralph Campbellf9315512010-05-23 21:44:54 -07001132 else if (qp->s_flags & QIB_S_WAIT_DMA) {
1133 qp->s_flags &= ~QIB_S_WAIT_DMA;
1134 qib_schedule_send(qp);
1135 }
1136 }
1137 spin_unlock(&qp->s_lock);
1138
1139 qib_put_txreq(tx);
1140}
1141
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001142static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -07001143{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001144 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001145 unsigned long flags;
1146 int ret = 0;
1147
1148 spin_lock_irqsave(&qp->s_lock, flags);
1149 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
Harish Chegondicd182012016-01-22 12:56:14 -08001150 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001151 if (list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001152 if (list_empty(&dev->memwait))
1153 mod_timer(&dev->mem_timer, jiffies + 1);
1154 qp->s_flags |= QIB_S_WAIT_KMEM;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001155 list_add_tail(&priv->iowait, &dev->memwait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001156 }
Harish Chegondicd182012016-01-22 12:56:14 -08001157 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001158 qp->s_flags &= ~QIB_S_BUSY;
1159 ret = -EBUSY;
1160 }
1161 spin_unlock_irqrestore(&qp->s_lock, flags);
1162
1163 return ret;
1164}
1165
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001166static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
1167 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
Ralph Campbellf9315512010-05-23 21:44:54 -07001168 u32 plen, u32 dwords)
1169{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001170 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001171 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1172 struct qib_devdata *dd = dd_from_dev(dev);
1173 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1174 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1175 struct qib_verbs_txreq *tx;
1176 struct qib_pio_header *phdr;
1177 u32 control;
1178 u32 ndesc;
1179 int ret;
1180
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001181 tx = priv->s_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -07001182 if (tx) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001183 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001184 /* resend previously constructed packet */
1185 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1186 goto bail;
1187 }
1188
Mike Marciniszyn48947102011-12-23 08:03:41 -05001189 tx = get_txreq(dev, qp);
1190 if (IS_ERR(tx))
1191 goto bail_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -07001192
1193 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1194 be16_to_cpu(hdr->lrh[0]) >> 12);
1195 tx->qp = qp;
1196 atomic_inc(&qp->refcount);
1197 tx->wqe = qp->s_wqe;
1198 tx->mr = qp->s_rdma_mr;
1199 if (qp->s_rdma_mr)
1200 qp->s_rdma_mr = NULL;
1201 tx->txreq.callback = sdma_complete;
1202 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1203 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1204 else
1205 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1206 if (plen + 1 > dd->piosize2kmax_dwords)
1207 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1208
1209 if (len) {
1210 /*
1211 * Don't try to DMA if it takes more descriptors than
1212 * the queue holds.
1213 */
1214 ndesc = qib_count_sge(ss, len);
1215 if (ndesc >= ppd->sdma_descq_cnt)
1216 ndesc = 0;
1217 } else
1218 ndesc = 1;
1219 if (ndesc) {
1220 phdr = &dev->pio_hdrs[tx->hdr_inx];
1221 phdr->pbc[0] = cpu_to_le32(plen);
1222 phdr->pbc[1] = cpu_to_le32(control);
1223 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1224 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1225 tx->txreq.sg_count = ndesc;
1226 tx->txreq.addr = dev->pio_hdrs_phys +
1227 tx->hdr_inx * sizeof(struct qib_pio_header);
1228 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1229 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1230 goto bail;
1231 }
1232
1233 /* Allocate a buffer and copy the header and payload to it. */
1234 tx->hdr_dwords = plen + 1;
1235 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1236 if (!phdr)
1237 goto err_tx;
1238 phdr->pbc[0] = cpu_to_le32(plen);
1239 phdr->pbc[1] = cpu_to_le32(control);
1240 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1241 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1242
1243 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1244 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1245 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1246 goto map_err;
1247 tx->align_buf = phdr;
1248 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1249 tx->txreq.sg_count = 1;
1250 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1251 goto unaligned;
1252
1253map_err:
1254 kfree(phdr);
1255err_tx:
1256 qib_put_txreq(tx);
1257 ret = wait_kmem(dev, qp);
1258unaligned:
Harish Chegondif24a6d42016-01-22 12:56:02 -08001259 ibp->rvp.n_unaligned++;
Ralph Campbellf9315512010-05-23 21:44:54 -07001260bail:
1261 return ret;
Mike Marciniszyn48947102011-12-23 08:03:41 -05001262bail_tx:
1263 ret = PTR_ERR(tx);
1264 goto bail;
Ralph Campbellf9315512010-05-23 21:44:54 -07001265}
1266
1267/*
1268 * If we are now in the error state, return zero to flush the
1269 * send work request.
1270 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001271static int no_bufs_available(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -07001272{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001273 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001274 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1275 struct qib_devdata *dd;
1276 unsigned long flags;
1277 int ret = 0;
1278
1279 /*
1280 * Note that as soon as want_buffer() is called and
1281 * possibly before it returns, qib_ib_piobufavail()
1282 * could be called. Therefore, put QP on the I/O wait list before
1283 * enabling the PIO avail interrupt.
1284 */
1285 spin_lock_irqsave(&qp->s_lock, flags);
1286 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
Harish Chegondicd182012016-01-22 12:56:14 -08001287 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001288 if (list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001289 dev->n_piowait++;
1290 qp->s_flags |= QIB_S_WAIT_PIO;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001291 list_add_tail(&priv->iowait, &dev->piowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001292 dd = dd_from_dev(dev);
1293 dd->f_wantpiobuf_intr(dd, 1);
1294 }
Harish Chegondicd182012016-01-22 12:56:14 -08001295 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001296 qp->s_flags &= ~QIB_S_BUSY;
1297 ret = -EBUSY;
1298 }
1299 spin_unlock_irqrestore(&qp->s_lock, flags);
1300 return ret;
1301}
1302
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001303static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
1304 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
Ralph Campbellf9315512010-05-23 21:44:54 -07001305 u32 plen, u32 dwords)
1306{
1307 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1308 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1309 u32 *hdr = (u32 *) ibhdr;
1310 u32 __iomem *piobuf_orig;
1311 u32 __iomem *piobuf;
1312 u64 pbc;
1313 unsigned long flags;
1314 unsigned flush_wc;
1315 u32 control;
1316 u32 pbufn;
1317
1318 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1319 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1320 pbc = ((u64) control << 32) | plen;
1321 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1322 if (unlikely(piobuf == NULL))
1323 return no_bufs_available(qp);
1324
1325 /*
1326 * Write the pbc.
1327 * We have to flush after the PBC for correctness on some cpus
1328 * or WC buffer can be written out of order.
1329 */
1330 writeq(pbc, piobuf);
1331 piobuf_orig = piobuf;
1332 piobuf += 2;
1333
1334 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1335 if (len == 0) {
1336 /*
1337 * If there is just the header portion, must flush before
1338 * writing last word of header for correctness, and after
1339 * the last header word (trigger word).
1340 */
1341 if (flush_wc) {
1342 qib_flush_wc();
1343 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1344 qib_flush_wc();
1345 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1346 qib_flush_wc();
1347 } else
1348 qib_pio_copy(piobuf, hdr, hdrwords);
1349 goto done;
1350 }
1351
1352 if (flush_wc)
1353 qib_flush_wc();
1354 qib_pio_copy(piobuf, hdr, hdrwords);
1355 piobuf += hdrwords;
1356
1357 /* The common case is aligned and contained in one segment. */
1358 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1359 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1360 u32 *addr = (u32 *) ss->sge.vaddr;
1361
1362 /* Update address before sending packet. */
1363 update_sge(ss, len);
1364 if (flush_wc) {
1365 qib_pio_copy(piobuf, addr, dwords - 1);
1366 /* must flush early everything before trigger word */
1367 qib_flush_wc();
1368 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1369 /* be sure trigger word is written */
1370 qib_flush_wc();
1371 } else
1372 qib_pio_copy(piobuf, addr, dwords);
1373 goto done;
1374 }
1375 copy_io(piobuf, ss, len, flush_wc);
1376done:
1377 if (dd->flags & QIB_USE_SPCL_TRIG) {
1378 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
Mike Marciniszynda12c1f2015-01-16 11:23:31 -05001379
Ralph Campbellf9315512010-05-23 21:44:54 -07001380 qib_flush_wc();
1381 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1382 }
1383 qib_sendbuf_done(dd, pbufn);
1384 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001385 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001386 qp->s_rdma_mr = NULL;
1387 }
1388 if (qp->s_wqe) {
1389 spin_lock_irqsave(&qp->s_lock, flags);
1390 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1391 spin_unlock_irqrestore(&qp->s_lock, flags);
1392 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1393 spin_lock_irqsave(&qp->s_lock, flags);
1394 qib_rc_send_complete(qp, ibhdr);
1395 spin_unlock_irqrestore(&qp->s_lock, flags);
1396 }
1397 return 0;
1398}
1399
1400/**
1401 * qib_verbs_send - send a packet
1402 * @qp: the QP to send on
1403 * @hdr: the packet header
1404 * @hdrwords: the number of 32-bit words in the header
1405 * @ss: the SGE to send
1406 * @len: the length of the packet in bytes
1407 *
1408 * Return zero if packet is sent or queued OK.
1409 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1410 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001411int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
1412 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
Ralph Campbellf9315512010-05-23 21:44:54 -07001413{
1414 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1415 u32 plen;
1416 int ret;
1417 u32 dwords = (len + 3) >> 2;
1418
1419 /*
1420 * Calculate the send buffer trigger address.
1421 * The +1 counts for the pbc control dword following the pbc length.
1422 */
1423 plen = hdrwords + dwords + 1;
1424
1425 /*
1426 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1427 * can defer SDMA restart until link goes ACTIVE without
1428 * worrying about just how we got there.
1429 */
1430 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1431 !(dd->flags & QIB_HAS_SEND_DMA))
1432 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1433 plen, dwords);
1434 else
1435 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1436 plen, dwords);
1437
1438 return ret;
1439}
1440
1441int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1442 u64 *rwords, u64 *spkts, u64 *rpkts,
1443 u64 *xmit_wait)
1444{
1445 int ret;
1446 struct qib_devdata *dd = ppd->dd;
1447
1448 if (!(dd->flags & QIB_PRESENT)) {
1449 /* no hardware, freeze, etc. */
1450 ret = -EINVAL;
1451 goto bail;
1452 }
1453 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1454 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1455 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1456 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1457 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1458
1459 ret = 0;
1460
1461bail:
1462 return ret;
1463}
1464
1465/**
1466 * qib_get_counters - get various chip counters
1467 * @dd: the qlogic_ib device
1468 * @cntrs: counters are placed here
1469 *
1470 * Return the counters needed by recv_pma_get_portcounters().
1471 */
1472int qib_get_counters(struct qib_pportdata *ppd,
1473 struct qib_verbs_counters *cntrs)
1474{
1475 int ret;
1476
1477 if (!(ppd->dd->flags & QIB_PRESENT)) {
1478 /* no hardware, freeze, etc. */
1479 ret = -EINVAL;
1480 goto bail;
1481 }
1482 cntrs->symbol_error_counter =
1483 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1484 cntrs->link_error_recovery_counter =
1485 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1486 /*
1487 * The link downed counter counts when the other side downs the
1488 * connection. We add in the number of times we downed the link
1489 * due to local link integrity errors to compensate.
1490 */
1491 cntrs->link_downed_counter =
1492 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1493 cntrs->port_rcv_errors =
1494 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1495 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1496 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1497 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1498 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1499 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1500 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1501 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1502 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1503 cntrs->port_rcv_errors +=
1504 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1505 cntrs->port_rcv_errors +=
1506 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1507 cntrs->port_rcv_remphys_errors =
1508 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1509 cntrs->port_xmit_discards =
1510 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1511 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1512 QIBPORTCNTR_WORDSEND);
1513 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1514 QIBPORTCNTR_WORDRCV);
1515 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1516 QIBPORTCNTR_PKTSEND);
1517 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1518 QIBPORTCNTR_PKTRCV);
1519 cntrs->local_link_integrity_errors =
1520 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1521 cntrs->excessive_buffer_overrun_errors =
1522 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1523 cntrs->vl15_dropped =
1524 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1525
1526 ret = 0;
1527
1528bail:
1529 return ret;
1530}
1531
1532/**
1533 * qib_ib_piobufavail - callback when a PIO buffer is available
1534 * @dd: the device pointer
1535 *
1536 * This is called from qib_intr() at interrupt level when a PIO buffer is
1537 * available after qib_verbs_send() returned an error that no buffers were
1538 * available. Disable the interrupt if there are no more QPs waiting.
1539 */
1540void qib_ib_piobufavail(struct qib_devdata *dd)
1541{
1542 struct qib_ibdev *dev = &dd->verbs_dev;
1543 struct list_head *list;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001544 struct rvt_qp *qps[5];
1545 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -07001546 unsigned long flags;
1547 unsigned i, n;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001548 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001549
1550 list = &dev->piowait;
1551 n = 0;
1552
1553 /*
1554 * Note: checking that the piowait list is empty and clearing
1555 * the buffer available interrupt needs to be atomic or we
1556 * could end up with QPs on the wait list with the interrupt
1557 * disabled.
1558 */
Harish Chegondicd182012016-01-22 12:56:14 -08001559 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001560 while (!list_empty(list)) {
1561 if (n == ARRAY_SIZE(qps))
1562 goto full;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001563 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1564 qp = priv->owner;
1565 list_del_init(&priv->iowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001566 atomic_inc(&qp->refcount);
1567 qps[n++] = qp;
1568 }
1569 dd->f_wantpiobuf_intr(dd, 0);
1570full:
Harish Chegondicd182012016-01-22 12:56:14 -08001571 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001572
1573 for (i = 0; i < n; i++) {
1574 qp = qps[i];
1575
1576 spin_lock_irqsave(&qp->s_lock, flags);
1577 if (qp->s_flags & QIB_S_WAIT_PIO) {
1578 qp->s_flags &= ~QIB_S_WAIT_PIO;
1579 qib_schedule_send(qp);
1580 }
1581 spin_unlock_irqrestore(&qp->s_lock, flags);
1582
1583 /* Notify qib_destroy_qp() if it is waiting. */
1584 if (atomic_dec_and_test(&qp->refcount))
1585 wake_up(&qp->wait);
1586 }
1587}
1588
Matan Barak2528e332015-06-11 16:35:25 +03001589static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1590 struct ib_udata *uhw)
Ralph Campbellf9315512010-05-23 21:44:54 -07001591{
1592 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1593 struct qib_ibdev *dev = to_idev(ibdev);
1594
Matan Barak2528e332015-06-11 16:35:25 +03001595 if (uhw->inlen || uhw->outlen)
1596 return -EINVAL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001597 memset(props, 0, sizeof(*props));
1598
1599 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1600 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1601 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1602 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1603 props->page_size_cap = PAGE_SIZE;
1604 props->vendor_id =
1605 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1606 props->vendor_part_id = dd->deviceid;
1607 props->hw_ver = dd->minrev;
1608 props->sys_image_guid = ib_qib_sys_image_guid;
1609 props->max_mr_size = ~0ULL;
1610 props->max_qp = ib_qib_max_qps;
1611 props->max_qp_wr = ib_qib_max_qp_wrs;
1612 props->max_sge = ib_qib_max_sges;
Steve Wiseaaae91f2015-07-27 18:10:07 -05001613 props->max_sge_rd = ib_qib_max_sges;
Ralph Campbellf9315512010-05-23 21:44:54 -07001614 props->max_cq = ib_qib_max_cqs;
1615 props->max_ah = ib_qib_max_ahs;
1616 props->max_cqe = ib_qib_max_cqes;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001617 props->max_mr = dev->rdi.lkey_table.max;
1618 props->max_fmr = dev->rdi.lkey_table.max;
Ralph Campbellf9315512010-05-23 21:44:54 -07001619 props->max_map_per_fmr = 32767;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -08001620 props->max_pd = dev->rdi.dparms.props.max_pd;
Ralph Campbellf9315512010-05-23 21:44:54 -07001621 props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1622 props->max_qp_init_rd_atom = 255;
1623 /* props->max_res_rd_atom */
1624 props->max_srq = ib_qib_max_srqs;
1625 props->max_srq_wr = ib_qib_max_srq_wrs;
1626 props->max_srq_sge = ib_qib_max_srq_sges;
1627 /* props->local_ca_ack_delay */
1628 props->atomic_cap = IB_ATOMIC_GLOB;
1629 props->max_pkeys = qib_get_npkeys(dd);
1630 props->max_mcast_grp = ib_qib_max_mcast_grps;
1631 props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1632 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1633 props->max_mcast_grp;
1634
1635 return 0;
1636}
1637
1638static int qib_query_port(struct ib_device *ibdev, u8 port,
1639 struct ib_port_attr *props)
1640{
1641 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1642 struct qib_ibport *ibp = to_iport(ibdev, port);
1643 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1644 enum ib_mtu mtu;
1645 u16 lid = ppd->lid;
1646
1647 memset(props, 0, sizeof(*props));
1648 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1649 props->lmc = ppd->lmc;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001650 props->sm_lid = ibp->rvp.sm_lid;
1651 props->sm_sl = ibp->rvp.sm_sl;
Ralph Campbellf9315512010-05-23 21:44:54 -07001652 props->state = dd->f_iblink_state(ppd->lastibcstat);
1653 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
Harish Chegondif24a6d42016-01-22 12:56:02 -08001654 props->port_cap_flags = ibp->rvp.port_cap_flags;
Ralph Campbellf9315512010-05-23 21:44:54 -07001655 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1656 props->max_msg_sz = 0x80000000;
1657 props->pkey_tbl_len = qib_get_npkeys(dd);
Harish Chegondif24a6d42016-01-22 12:56:02 -08001658 props->bad_pkey_cntr = ibp->rvp.pkey_violations;
1659 props->qkey_viol_cntr = ibp->rvp.qkey_violations;
Ralph Campbellf9315512010-05-23 21:44:54 -07001660 props->active_width = ppd->link_width_active;
1661 /* See rate_show() */
1662 props->active_speed = ppd->link_speed_active;
1663 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1664 props->init_type_reply = 0;
1665
1666 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1667 switch (ppd->ibmtu) {
1668 case 4096:
1669 mtu = IB_MTU_4096;
1670 break;
1671 case 2048:
1672 mtu = IB_MTU_2048;
1673 break;
1674 case 1024:
1675 mtu = IB_MTU_1024;
1676 break;
1677 case 512:
1678 mtu = IB_MTU_512;
1679 break;
1680 case 256:
1681 mtu = IB_MTU_256;
1682 break;
1683 default:
1684 mtu = IB_MTU_2048;
1685 }
1686 props->active_mtu = mtu;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001687 props->subnet_timeout = ibp->rvp.subnet_timeout;
Ralph Campbellf9315512010-05-23 21:44:54 -07001688
1689 return 0;
1690}
1691
1692static int qib_modify_device(struct ib_device *device,
1693 int device_modify_mask,
1694 struct ib_device_modify *device_modify)
1695{
1696 struct qib_devdata *dd = dd_from_ibdev(device);
1697 unsigned i;
1698 int ret;
1699
1700 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1701 IB_DEVICE_MODIFY_NODE_DESC)) {
1702 ret = -EOPNOTSUPP;
1703 goto bail;
1704 }
1705
1706 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1707 memcpy(device->node_desc, device_modify->node_desc, 64);
1708 for (i = 0; i < dd->num_pports; i++) {
1709 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1710
1711 qib_node_desc_chg(ibp);
1712 }
1713 }
1714
1715 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1716 ib_qib_sys_image_guid =
1717 cpu_to_be64(device_modify->sys_image_guid);
1718 for (i = 0; i < dd->num_pports; i++) {
1719 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1720
1721 qib_sys_guid_chg(ibp);
1722 }
1723 }
1724
1725 ret = 0;
1726
1727bail:
1728 return ret;
1729}
1730
1731static int qib_modify_port(struct ib_device *ibdev, u8 port,
1732 int port_modify_mask, struct ib_port_modify *props)
1733{
1734 struct qib_ibport *ibp = to_iport(ibdev, port);
1735 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1736
Harish Chegondif24a6d42016-01-22 12:56:02 -08001737 ibp->rvp.port_cap_flags |= props->set_port_cap_mask;
1738 ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask;
Ralph Campbellf9315512010-05-23 21:44:54 -07001739 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1740 qib_cap_mask_chg(ibp);
1741 if (port_modify_mask & IB_PORT_SHUTDOWN)
1742 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1743 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
Harish Chegondif24a6d42016-01-22 12:56:02 -08001744 ibp->rvp.qkey_violations = 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001745 return 0;
1746}
1747
1748static int qib_query_gid(struct ib_device *ibdev, u8 port,
1749 int index, union ib_gid *gid)
1750{
1751 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1752 int ret = 0;
1753
1754 if (!port || port > dd->num_pports)
1755 ret = -EINVAL;
1756 else {
1757 struct qib_ibport *ibp = to_iport(ibdev, port);
1758 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1759
Harish Chegondif24a6d42016-01-22 12:56:02 -08001760 gid->global.subnet_prefix = ibp->rvp.gid_prefix;
Ralph Campbellf9315512010-05-23 21:44:54 -07001761 if (index == 0)
1762 gid->global.interface_id = ppd->guid;
1763 else if (index < QIB_GUIDS_PER_PORT)
1764 gid->global.interface_id = ibp->guids[index - 1];
1765 else
1766 ret = -EINVAL;
1767 }
1768
1769 return ret;
1770}
1771
Ralph Campbellf9315512010-05-23 21:44:54 -07001772int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1773{
Ralph Campbellf9315512010-05-23 21:44:54 -07001774 if (ah_attr->sl > 15)
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08001775 return -EINVAL;
1776
Ralph Campbellf9315512010-05-23 21:44:54 -07001777 return 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001778}
1779
Harish Chegondi5418a5a2016-01-22 12:56:08 -08001780static void qib_notify_new_ah(struct ib_device *ibdev,
1781 struct ib_ah_attr *ah_attr,
1782 struct rvt_ah *ah)
1783{
1784 struct qib_ibport *ibp;
1785 struct qib_pportdata *ppd;
1786
1787 /*
1788 * Do not trust reading anything from rvt_ah at this point as it is not
1789 * done being setup. We can however modify things which we need to set.
1790 */
1791
1792 ibp = to_iport(ibdev, ah_attr->port_num);
1793 ppd = ppd_from_ibp(ibp);
1794 ah->vl = ibp->sl_to_vl[ah->attr.sl];
1795 ah->log_pmtu = ilog2(ppd->ibmtu);
1796}
1797
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001798struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1799{
1800 struct ib_ah_attr attr;
1801 struct ib_ah *ah = ERR_PTR(-EINVAL);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001802 struct rvt_qp *qp0;
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001803
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001804 memset(&attr, 0, sizeof(attr));
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001805 attr.dlid = dlid;
1806 attr.port_num = ppd_from_ibp(ibp)->port;
1807 rcu_read_lock();
Harish Chegondif24a6d42016-01-22 12:56:02 -08001808 qp0 = rcu_dereference(ibp->rvp.qp[0]);
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001809 if (qp0)
1810 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1811 rcu_read_unlock();
1812 return ah;
1813}
1814
Ralph Campbellf9315512010-05-23 21:44:54 -07001815/**
Ralph Campbellf9315512010-05-23 21:44:54 -07001816 * qib_get_npkeys - return the size of the PKEY table for context 0
1817 * @dd: the qlogic_ib device
1818 */
1819unsigned qib_get_npkeys(struct qib_devdata *dd)
1820{
1821 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1822}
1823
1824/*
1825 * Return the indexed PKEY from the port PKEY table.
1826 * No need to validate rcd[ctxt]; the port is setup if we are here.
1827 */
1828unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1829{
1830 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1831 struct qib_devdata *dd = ppd->dd;
1832 unsigned ctxt = ppd->hw_pidx;
1833 unsigned ret;
1834
1835 /* dd->rcd null if mini_init or some init failures */
1836 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1837 ret = 0;
1838 else
1839 ret = dd->rcd[ctxt]->pkeys[index];
1840
1841 return ret;
1842}
1843
Ralph Campbellf9315512010-05-23 21:44:54 -07001844/**
1845 * qib_alloc_ucontext - allocate a ucontest
1846 * @ibdev: the infiniband device
1847 * @udata: not used by the QLogic_IB driver
1848 */
1849
1850static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1851 struct ib_udata *udata)
1852{
1853 struct qib_ucontext *context;
1854 struct ib_ucontext *ret;
1855
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001856 context = kmalloc(sizeof(*context), GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001857 if (!context) {
1858 ret = ERR_PTR(-ENOMEM);
1859 goto bail;
1860 }
1861
1862 ret = &context->ibucontext;
1863
1864bail:
1865 return ret;
1866}
1867
1868static int qib_dealloc_ucontext(struct ib_ucontext *context)
1869{
1870 kfree(to_iucontext(context));
1871 return 0;
1872}
1873
1874static void init_ibport(struct qib_pportdata *ppd)
1875{
1876 struct qib_verbs_counters cntrs;
1877 struct qib_ibport *ibp = &ppd->ibport_data;
1878
Harish Chegondif24a6d42016-01-22 12:56:02 -08001879 spin_lock_init(&ibp->rvp.lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001880 /* Set the prefix to the default value (see ch. 4.1.1) */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001881 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1882 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1883 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
Ralph Campbellf9315512010-05-23 21:44:54 -07001884 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1885 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1886 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1887 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1888 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
Harish Chegondif24a6d42016-01-22 12:56:02 -08001889 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1890 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1891 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1892 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1893 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1894 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
Ralph Campbellf9315512010-05-23 21:44:54 -07001895
1896 /* Snapshot current HW counters to "clear" them. */
1897 qib_get_counters(ppd, &cntrs);
1898 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1899 ibp->z_link_error_recovery_counter =
1900 cntrs.link_error_recovery_counter;
1901 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1902 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1903 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1904 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1905 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1906 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1907 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1908 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1909 ibp->z_local_link_integrity_errors =
1910 cntrs.local_link_integrity_errors;
1911 ibp->z_excessive_buffer_overrun_errors =
1912 cntrs.excessive_buffer_overrun_errors;
1913 ibp->z_vl15_dropped = cntrs.vl15_dropped;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001914 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1915 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001916}
1917
Ira Weiny77386132015-05-13 20:02:58 -04001918static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
1919 struct ib_port_immutable *immutable)
1920{
1921 struct ib_port_attr attr;
1922 int err;
1923
1924 err = qib_query_port(ibdev, port_num, &attr);
1925 if (err)
1926 return err;
1927
1928 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1929 immutable->gid_tbl_len = attr.gid_tbl_len;
Ira Weinyf9b22e32015-05-13 20:02:59 -04001930 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
Ira Weiny337877a2015-06-06 14:38:29 -04001931 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
Ira Weiny77386132015-05-13 20:02:58 -04001932
1933 return 0;
1934}
1935
Ralph Campbellf9315512010-05-23 21:44:54 -07001936/**
1937 * qib_register_ib_device - register our device with the infiniband core
1938 * @dd: the device data structure
1939 * Return the allocated qib_ibdev pointer or NULL on error.
1940 */
1941int qib_register_ib_device(struct qib_devdata *dd)
1942{
1943 struct qib_ibdev *dev = &dd->verbs_dev;
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001944 struct ib_device *ibdev = &dev->rdi.ibdev;
Ralph Campbellf9315512010-05-23 21:44:54 -07001945 struct qib_pportdata *ppd = dd->pport;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001946 unsigned i, ctxt;
Ralph Campbellf9315512010-05-23 21:44:54 -07001947 int ret;
1948
1949 dev->qp_table_size = ib_qib_qp_table_size;
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001950 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
Mike Marciniszyna46a2802015-01-16 10:52:18 -05001951 dev->qp_table = kmalloc_array(
1952 dev->qp_table_size,
1953 sizeof(*dev->qp_table),
Ralph Campbellf9315512010-05-23 21:44:54 -07001954 GFP_KERNEL);
1955 if (!dev->qp_table) {
1956 ret = -ENOMEM;
1957 goto err_qpt;
1958 }
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001959 for (i = 0; i < dev->qp_table_size; i++)
1960 RCU_INIT_POINTER(dev->qp_table[i], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001961
1962 for (i = 0; i < dd->num_pports; i++)
1963 init_ibport(ppd + i);
1964
1965 /* Only need to initialize non-zero fields. */
1966 spin_lock_init(&dev->qpt_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001967 spin_lock_init(&dev->n_cqs_lock);
1968 spin_lock_init(&dev->n_qps_lock);
1969 spin_lock_init(&dev->n_srqs_lock);
1970 spin_lock_init(&dev->n_mcast_grps_lock);
1971 init_timer(&dev->mem_timer);
1972 dev->mem_timer.function = mem_timer;
1973 dev->mem_timer.data = (unsigned long) dev;
1974
1975 qib_init_qpn_table(dd, &dev->qpn_table);
1976
Ralph Campbellf9315512010-05-23 21:44:54 -07001977 INIT_LIST_HEAD(&dev->piowait);
1978 INIT_LIST_HEAD(&dev->dmawait);
1979 INIT_LIST_HEAD(&dev->txwait);
1980 INIT_LIST_HEAD(&dev->memwait);
1981 INIT_LIST_HEAD(&dev->txreq_free);
1982
1983 if (ppd->sdma_descq_cnt) {
1984 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1985 ppd->sdma_descq_cnt *
1986 sizeof(struct qib_pio_header),
1987 &dev->pio_hdrs_phys,
1988 GFP_KERNEL);
1989 if (!dev->pio_hdrs) {
1990 ret = -ENOMEM;
1991 goto err_hdrs;
1992 }
1993 }
1994
1995 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1996 struct qib_verbs_txreq *tx;
1997
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001998 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001999 if (!tx) {
2000 ret = -ENOMEM;
2001 goto err_tx;
2002 }
2003 tx->hdr_inx = i;
2004 list_add(&tx->txreq.list, &dev->txreq_free);
2005 }
2006
2007 /*
2008 * The system image GUID is supposed to be the same for all
2009 * IB HCAs in a single system but since there can be other
2010 * device types in the system, we can't be sure this is unique.
2011 */
2012 if (!ib_qib_sys_image_guid)
2013 ib_qib_sys_image_guid = ppd->guid;
2014
2015 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2016 ibdev->owner = THIS_MODULE;
2017 ibdev->node_guid = ppd->guid;
2018 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2019 ibdev->uverbs_cmd_mask =
2020 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2021 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2022 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2023 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2024 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2025 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2026 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
2027 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2028 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2029 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2030 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2031 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2032 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2033 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2034 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2035 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2036 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2037 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2038 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2039 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2040 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2041 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2042 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2043 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2044 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2045 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2046 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2047 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2048 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2049 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2050 ibdev->node_type = RDMA_NODE_IB_CA;
2051 ibdev->phys_port_cnt = dd->num_pports;
2052 ibdev->num_comp_vectors = 1;
2053 ibdev->dma_device = &dd->pcidev->dev;
2054 ibdev->query_device = qib_query_device;
2055 ibdev->modify_device = qib_modify_device;
2056 ibdev->query_port = qib_query_port;
2057 ibdev->modify_port = qib_modify_port;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08002058 ibdev->query_pkey = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07002059 ibdev->query_gid = qib_query_gid;
2060 ibdev->alloc_ucontext = qib_alloc_ucontext;
2061 ibdev->dealloc_ucontext = qib_dealloc_ucontext;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -08002062 ibdev->alloc_pd = NULL;
2063 ibdev->dealloc_pd = NULL;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08002064 ibdev->create_ah = NULL;
2065 ibdev->destroy_ah = NULL;
2066 ibdev->modify_ah = NULL;
2067 ibdev->query_ah = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07002068 ibdev->create_srq = qib_create_srq;
2069 ibdev->modify_srq = qib_modify_srq;
2070 ibdev->query_srq = qib_query_srq;
2071 ibdev->destroy_srq = qib_destroy_srq;
2072 ibdev->create_qp = qib_create_qp;
2073 ibdev->modify_qp = qib_modify_qp;
2074 ibdev->query_qp = qib_query_qp;
2075 ibdev->destroy_qp = qib_destroy_qp;
2076 ibdev->post_send = qib_post_send;
2077 ibdev->post_recv = qib_post_receive;
2078 ibdev->post_srq_recv = qib_post_srq_receive;
2079 ibdev->create_cq = qib_create_cq;
2080 ibdev->destroy_cq = qib_destroy_cq;
2081 ibdev->resize_cq = qib_resize_cq;
2082 ibdev->poll_cq = qib_poll_cq;
2083 ibdev->req_notify_cq = qib_req_notify_cq;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002084 ibdev->get_dma_mr = NULL;
2085 ibdev->reg_user_mr = NULL;
2086 ibdev->dereg_mr = NULL;
2087 ibdev->alloc_mr = NULL;
2088 ibdev->map_mr_sg = NULL;
2089 ibdev->alloc_fmr = NULL;
2090 ibdev->map_phys_fmr = NULL;
2091 ibdev->unmap_fmr = NULL;
2092 ibdev->dealloc_fmr = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -07002093 ibdev->attach_mcast = qib_multicast_attach;
2094 ibdev->detach_mcast = qib_multicast_detach;
2095 ibdev->process_mad = qib_process_mad;
Harish Chegondicd182012016-01-22 12:56:14 -08002096 ibdev->mmap = NULL;
Dennis Dalessandroeb636ac2016-01-22 12:44:36 -08002097 ibdev->dma_ops = NULL;
Ira Weiny77386132015-05-13 20:02:58 -04002098 ibdev->get_port_immutable = qib_port_immutable;
Ralph Campbellf9315512010-05-23 21:44:54 -07002099
2100 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
Vinit Agnihotrie2eed582013-03-14 18:13:41 +00002101 "Intel Infiniband HCA %s", init_utsname()->nodename);
Ralph Campbellf9315512010-05-23 21:44:54 -07002102
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002103 /*
2104 * Fill in rvt info object.
2105 */
2106 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
Dennis Dalessandro6a9df402016-01-22 12:45:20 -08002107 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
2108 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08002109 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
Harish Chegondi5418a5a2016-01-22 12:56:08 -08002110 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002111 dd->verbs_dev.rdi.dparms.props.max_pd = ib_qib_max_pds;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08002112 dd->verbs_dev.rdi.dparms.props.max_ah = ib_qib_max_ahs;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002113 dd->verbs_dev.rdi.flags = (RVT_FLAG_QP_INIT_DRIVER |
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002114 RVT_FLAG_CQ_INIT_DRIVER);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002115 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08002116 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
2117 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
2118
2119 ppd = dd->pport;
2120 for (i = 0; i < dd->num_pports; i++, ppd++) {
2121 ctxt = ppd->hw_pidx;
2122 rvt_init_port(&dd->verbs_dev.rdi,
2123 &ppd->ibport_data.rvp,
2124 i,
2125 dd->rcd[ctxt]->pkeys);
2126 }
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002127
2128 ret = rvt_register_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07002129 if (ret)
2130 goto err_reg;
2131
2132 ret = qib_create_agents(dev);
2133 if (ret)
2134 goto err_agents;
2135
Mike Marciniszync9bdad32013-03-28 18:17:20 +00002136 ret = qib_verbs_register_sysfs(dd);
2137 if (ret)
Ralph Campbellf9315512010-05-23 21:44:54 -07002138 goto err_class;
2139
2140 goto bail;
2141
2142err_class:
2143 qib_free_agents(dev);
2144err_agents:
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002145 rvt_unregister_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07002146err_reg:
2147err_tx:
2148 while (!list_empty(&dev->txreq_free)) {
2149 struct list_head *l = dev->txreq_free.next;
2150 struct qib_verbs_txreq *tx;
2151
2152 list_del(l);
2153 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2154 kfree(tx);
2155 }
2156 if (ppd->sdma_descq_cnt)
2157 dma_free_coherent(&dd->pcidev->dev,
2158 ppd->sdma_descq_cnt *
2159 sizeof(struct qib_pio_header),
2160 dev->pio_hdrs, dev->pio_hdrs_phys);
2161err_hdrs:
Ralph Campbellf9315512010-05-23 21:44:54 -07002162 kfree(dev->qp_table);
2163err_qpt:
2164 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2165bail:
2166 return ret;
2167}
2168
2169void qib_unregister_ib_device(struct qib_devdata *dd)
2170{
2171 struct qib_ibdev *dev = &dd->verbs_dev;
Ralph Campbellf9315512010-05-23 21:44:54 -07002172 u32 qps_inuse;
Ralph Campbellf9315512010-05-23 21:44:54 -07002173
2174 qib_verbs_unregister_sysfs(dd);
2175
2176 qib_free_agents(dev);
2177
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08002178 rvt_unregister_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07002179
2180 if (!list_empty(&dev->piowait))
2181 qib_dev_err(dd, "piowait list not empty!\n");
2182 if (!list_empty(&dev->dmawait))
2183 qib_dev_err(dd, "dmawait list not empty!\n");
2184 if (!list_empty(&dev->txwait))
2185 qib_dev_err(dd, "txwait list not empty!\n");
2186 if (!list_empty(&dev->memwait))
2187 qib_dev_err(dd, "memwait list not empty!\n");
Ralph Campbellf9315512010-05-23 21:44:54 -07002188
2189 qps_inuse = qib_free_all_qps(dd);
2190 if (qps_inuse)
2191 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2192 qps_inuse);
2193
2194 del_timer_sync(&dev->mem_timer);
2195 qib_free_qpn_table(&dev->qpn_table);
2196 while (!list_empty(&dev->txreq_free)) {
2197 struct list_head *l = dev->txreq_free.next;
2198 struct qib_verbs_txreq *tx;
2199
2200 list_del(l);
2201 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2202 kfree(tx);
2203 }
2204 if (dd->pport->sdma_descq_cnt)
2205 dma_free_coherent(&dd->pcidev->dev,
2206 dd->pport->sdma_descq_cnt *
2207 sizeof(struct qib_pio_header),
2208 dev->pio_hdrs, dev->pio_hdrs_phys);
Ralph Campbellf9315512010-05-23 21:44:54 -07002209 kfree(dev->qp_table);
2210}
Mike Marciniszyn551ace12012-07-19 13:03:56 +00002211
2212/*
2213 * This must be called with s_lock held.
2214 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08002215void qib_schedule_send(struct rvt_qp *qp)
Mike Marciniszyn551ace12012-07-19 13:03:56 +00002216{
Dennis Dalessandroffc26902016-01-22 12:45:11 -08002217 struct qib_qp_priv *priv = qp->priv;
Mike Marciniszyn551ace12012-07-19 13:03:56 +00002218 if (qib_send_ok(qp)) {
2219 struct qib_ibport *ibp =
2220 to_iport(qp->ibqp.device, qp->port_num);
2221 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2222
Dennis Dalessandroffc26902016-01-22 12:45:11 -08002223 queue_work(ppd->qib_wq, &priv->s_work);
Mike Marciniszyn551ace12012-07-19 13:03:56 +00002224 }
2225}