blob: 2044824689530577d57e0e5d5eafb64695deb5ce [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
Vinit Agnihotrie2eed582013-03-14 18:13:41 +00002 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00003 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
Ralph Campbellf9315512010-05-23 21:44:54 -07004 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_mad.h>
36#include <rdma/ib_user_verbs.h>
37#include <linux/io.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040038#include <linux/module.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070039#include <linux/utsname.h>
40#include <linux/rculist.h>
41#include <linux/mm.h>
Mike Marciniszynaf061a62011-09-23 13:16:44 -040042#include <linux/random.h>
Mike Marciniszynd6f1c172015-07-21 08:36:07 -040043#include <linux/vmalloc.h>
Dennis Dalessandroeb636ac2016-01-22 12:44:36 -080044#include <rdma/rdma_vt.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070045
46#include "qib.h"
47#include "qib_common.h"
48
Mike Marciniszynaf061a62011-09-23 13:16:44 -040049static unsigned int ib_qib_qp_table_size = 256;
Ralph Campbellf9315512010-05-23 21:44:54 -070050module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51MODULE_PARM_DESC(qp_table_size, "QP table size");
52
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080053static unsigned int qib_lkey_table_size = 16;
54module_param_named(lkey_table_size, qib_lkey_table_size, uint,
Ralph Campbellf9315512010-05-23 21:44:54 -070055 S_IRUGO);
56MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
58
59static unsigned int ib_qib_max_pds = 0xFFFF;
60module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
63
64static unsigned int ib_qib_max_ahs = 0xFFFF;
65module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
67
68unsigned int ib_qib_max_cqes = 0x2FFFF;
69module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
72
73unsigned int ib_qib_max_cqs = 0x1FFFF;
74module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
76
77unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
80
81unsigned int ib_qib_max_qps = 16384;
82module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
84
85unsigned int ib_qib_max_sges = 0x60;
86module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
88
89unsigned int ib_qib_max_mcast_grps = 16384;
90module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
93
94unsigned int ib_qib_max_mcast_qp_attached = 16;
95module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
96 uint, S_IRUGO);
97MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
99
100unsigned int ib_qib_max_srqs = 1024;
101module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
103
104unsigned int ib_qib_max_srq_sges = 128;
105module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
107
108unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111
112static unsigned int ib_qib_disable_sma;
113module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114MODULE_PARM_DESC(disable_sma, "Disable the SMA");
115
116/*
Ralph Campbellf9315512010-05-23 21:44:54 -0700117 * Translate ib_wr_opcode into ib_wc_opcode.
118 */
119const enum ib_wc_opcode ib_qib_wc_opcode[] = {
120 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
121 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
122 [IB_WR_SEND] = IB_WC_SEND,
123 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
124 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
125 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
126 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
127};
128
129/*
130 * System image GUID.
131 */
132__be64 ib_qib_sys_image_guid;
133
134/**
135 * qib_copy_sge - copy data to SGE memory
136 * @ss: the SGE state
137 * @data: the data to copy
138 * @length: the length of the data
139 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800140void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
Ralph Campbellf9315512010-05-23 21:44:54 -0700141{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800142 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700143
144 while (length) {
145 u32 len = sge->length;
146
147 if (len > length)
148 len = length;
149 if (len > sge->sge_length)
150 len = sge->sge_length;
151 BUG_ON(len == 0);
152 memcpy(sge->vaddr, data, len);
153 sge->vaddr += len;
154 sge->length -= len;
155 sge->sge_length -= len;
156 if (sge->sge_length == 0) {
157 if (release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800158 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700159 if (--ss->num_sge)
160 *sge = *ss->sg_list++;
161 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800162 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700163 if (++sge->m >= sge->mr->mapsz)
164 break;
165 sge->n = 0;
166 }
167 sge->vaddr =
168 sge->mr->map[sge->m]->segs[sge->n].vaddr;
169 sge->length =
170 sge->mr->map[sge->m]->segs[sge->n].length;
171 }
172 data += len;
173 length -= len;
174 }
175}
176
177/**
178 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
179 * @ss: the SGE state
180 * @length: the number of bytes to skip
181 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800182void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
Ralph Campbellf9315512010-05-23 21:44:54 -0700183{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800184 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700185
186 while (length) {
187 u32 len = sge->length;
188
189 if (len > length)
190 len = length;
191 if (len > sge->sge_length)
192 len = sge->sge_length;
193 BUG_ON(len == 0);
194 sge->vaddr += len;
195 sge->length -= len;
196 sge->sge_length -= len;
197 if (sge->sge_length == 0) {
198 if (release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800199 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700200 if (--ss->num_sge)
201 *sge = *ss->sg_list++;
202 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800203 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700204 if (++sge->m >= sge->mr->mapsz)
205 break;
206 sge->n = 0;
207 }
208 sge->vaddr =
209 sge->mr->map[sge->m]->segs[sge->n].vaddr;
210 sge->length =
211 sge->mr->map[sge->m]->segs[sge->n].length;
212 }
213 length -= len;
214 }
215}
216
217/*
218 * Count the number of DMA descriptors needed to send length bytes of data.
219 * Don't modify the qib_sge_state to get the count.
220 * Return zero if any of the segments is not aligned.
221 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800222static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700223{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800224 struct rvt_sge *sg_list = ss->sg_list;
225 struct rvt_sge sge = ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700226 u8 num_sge = ss->num_sge;
227 u32 ndesc = 1; /* count the header */
228
229 while (length) {
230 u32 len = sge.length;
231
232 if (len > length)
233 len = length;
234 if (len > sge.sge_length)
235 len = sge.sge_length;
236 BUG_ON(len == 0);
237 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
238 (len != length && (len & (sizeof(u32) - 1)))) {
239 ndesc = 0;
240 break;
241 }
242 ndesc++;
243 sge.vaddr += len;
244 sge.length -= len;
245 sge.sge_length -= len;
246 if (sge.sge_length == 0) {
247 if (--num_sge)
248 sge = *sg_list++;
249 } else if (sge.length == 0 && sge.mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800250 if (++sge.n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700251 if (++sge.m >= sge.mr->mapsz)
252 break;
253 sge.n = 0;
254 }
255 sge.vaddr =
256 sge.mr->map[sge.m]->segs[sge.n].vaddr;
257 sge.length =
258 sge.mr->map[sge.m]->segs[sge.n].length;
259 }
260 length -= len;
261 }
262 return ndesc;
263}
264
265/*
266 * Copy from the SGEs to the data buffer.
267 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800268static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700269{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800270 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700271
272 while (length) {
273 u32 len = sge->length;
274
275 if (len > length)
276 len = length;
277 if (len > sge->sge_length)
278 len = sge->sge_length;
279 BUG_ON(len == 0);
280 memcpy(data, sge->vaddr, len);
281 sge->vaddr += len;
282 sge->length -= len;
283 sge->sge_length -= len;
284 if (sge->sge_length == 0) {
285 if (--ss->num_sge)
286 *sge = *ss->sg_list++;
287 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800288 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700289 if (++sge->m >= sge->mr->mapsz)
290 break;
291 sge->n = 0;
292 }
293 sge->vaddr =
294 sge->mr->map[sge->m]->segs[sge->n].vaddr;
295 sge->length =
296 sge->mr->map[sge->m]->segs[sge->n].length;
297 }
298 data += len;
299 length -= len;
300 }
301}
302
303/**
Ralph Campbellf9315512010-05-23 21:44:54 -0700304 * qib_qp_rcv - processing an incoming packet on a QP
305 * @rcd: the context pointer
306 * @hdr: the packet header
307 * @has_grh: true if the packet has a GRH
308 * @data: the packet data
309 * @tlen: the packet length
310 * @qp: the QP the packet came on
311 *
312 * This is called from qib_ib_rcv() to process an incoming packet
313 * for the given QP.
314 * Called at interrupt level.
315 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700316static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800317 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700318{
319 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
320
Ralph Campbella5210c12010-08-02 22:39:30 +0000321 spin_lock(&qp->r_lock);
322
Ralph Campbellf9315512010-05-23 21:44:54 -0700323 /* Check for valid receive state. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800324 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800325 ibp->rvp.n_pkt_drops++;
Ralph Campbella5210c12010-08-02 22:39:30 +0000326 goto unlock;
Ralph Campbellf9315512010-05-23 21:44:54 -0700327 }
328
329 switch (qp->ibqp.qp_type) {
330 case IB_QPT_SMI:
331 case IB_QPT_GSI:
332 if (ib_qib_disable_sma)
333 break;
334 /* FALLTHROUGH */
335 case IB_QPT_UD:
336 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
337 break;
338
339 case IB_QPT_RC:
340 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
341 break;
342
343 case IB_QPT_UC:
344 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
345 break;
346
347 default:
348 break;
349 }
Ralph Campbella5210c12010-08-02 22:39:30 +0000350
351unlock:
352 spin_unlock(&qp->r_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700353}
354
355/**
356 * qib_ib_rcv - process an incoming packet
357 * @rcd: the context pointer
358 * @rhdr: the header of the packet
359 * @data: the packet payload
360 * @tlen: the packet length
361 *
362 * This is called from qib_kreceive() to process an incoming packet at
363 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
364 */
365void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
366{
367 struct qib_pportdata *ppd = rcd->ppd;
368 struct qib_ibport *ibp = &ppd->ibport_data;
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700369 struct ib_header *hdr = rhdr;
Harish Chegondi1cefc2c2016-02-03 14:20:19 -0800370 struct qib_devdata *dd = ppd->dd;
371 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700372 struct ib_other_headers *ohdr;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800373 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700374 u32 qp_num;
375 int lnh;
376 u8 opcode;
377 u16 lid;
378
379 /* 24 == LRH+BTH+CRC */
380 if (unlikely(tlen < 24))
381 goto drop;
382
383 /* Check for a valid destination LID (see ch. 7.11.1). */
384 lid = be16_to_cpu(hdr->lrh[1]);
Dennis Dalessandro9ff198f2016-01-22 12:44:53 -0800385 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700386 lid &= ~((1 << ppd->lmc) - 1);
387 if (unlikely(lid != ppd->lid))
388 goto drop;
389 }
390
391 /* Check for GRH */
392 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
393 if (lnh == QIB_LRH_BTH)
394 ohdr = &hdr->u.oth;
395 else if (lnh == QIB_LRH_GRH) {
396 u32 vtf;
397
398 ohdr = &hdr->u.l.oth;
399 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
400 goto drop;
401 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
402 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
403 goto drop;
404 } else
405 goto drop;
406
Mike Marciniszynddb88762013-06-15 17:07:03 -0400407 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
408#ifdef CONFIG_DEBUG_FS
409 rcd->opstats->stats[opcode].n_bytes += tlen;
410 rcd->opstats->stats[opcode].n_packets++;
411#endif
Ralph Campbellf9315512010-05-23 21:44:54 -0700412
413 /* Get the destination QP number. */
Harish Chegondi70696ea2016-02-03 14:20:27 -0800414 qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
Ralph Campbellf9315512010-05-23 21:44:54 -0700415 if (qp_num == QIB_MULTICAST_QPN) {
Harish Chegondi18f6c582016-01-22 13:07:55 -0800416 struct rvt_mcast *mcast;
417 struct rvt_mcast_qp *p;
Ralph Campbellf9315512010-05-23 21:44:54 -0700418
419 if (lnh != QIB_LRH_GRH)
420 goto drop;
Harish Chegondi18f6c582016-01-22 13:07:55 -0800421 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
Ralph Campbellf9315512010-05-23 21:44:54 -0700422 if (mcast == NULL)
423 goto drop;
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500424 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700425 list_for_each_entry_rcu(p, &mcast->qp_list, list)
426 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
427 /*
Harish Chegondi18f6c582016-01-22 13:07:55 -0800428 * Notify rvt_multicast_detach() if it is waiting for us
Ralph Campbellf9315512010-05-23 21:44:54 -0700429 * to finish.
430 */
431 if (atomic_dec_return(&mcast->refcount) <= 1)
432 wake_up(&mcast->wait);
433 } else {
Harish Chegondi1cefc2c2016-02-03 14:20:19 -0800434 rcu_read_lock();
435 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
436 if (!qp) {
437 rcu_read_unlock();
438 goto drop;
Mike Marciniszynaf061a62011-09-23 13:16:44 -0400439 }
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500440 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
Ralph Campbellf9315512010-05-23 21:44:54 -0700441 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
Harish Chegondi1cefc2c2016-02-03 14:20:19 -0800442 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700443 }
444 return;
445
446drop:
Harish Chegondif24a6d42016-01-22 12:56:02 -0800447 ibp->rvp.n_pkt_drops++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700448}
449
450/*
451 * This is called from a timer to check for QPs
452 * which need kernel memory in order to send a packet.
453 */
454static void mem_timer(unsigned long data)
455{
456 struct qib_ibdev *dev = (struct qib_ibdev *) data;
457 struct list_head *list = &dev->memwait;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800458 struct rvt_qp *qp = NULL;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800459 struct qib_qp_priv *priv = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700460 unsigned long flags;
461
Harish Chegondicd182012016-01-22 12:56:14 -0800462 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700463 if (!list_empty(list)) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800464 priv = list_entry(list->next, struct qib_qp_priv, iowait);
465 qp = priv->owner;
466 list_del_init(&priv->iowait);
Sebastian Sanchez238b1862016-12-07 19:34:00 -0800467 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700468 if (!list_empty(list))
469 mod_timer(&dev->mem_timer, jiffies + 1);
470 }
Harish Chegondicd182012016-01-22 12:56:14 -0800471 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700472
473 if (qp) {
474 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800475 if (qp->s_flags & RVT_S_WAIT_KMEM) {
476 qp->s_flags &= ~RVT_S_WAIT_KMEM;
Ralph Campbellf9315512010-05-23 21:44:54 -0700477 qib_schedule_send(qp);
478 }
479 spin_unlock_irqrestore(&qp->s_lock, flags);
Sebastian Sanchez238b1862016-12-07 19:34:00 -0800480 rvt_put_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700481 }
482}
483
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800484static void update_sge(struct rvt_sge_state *ss, u32 length)
Ralph Campbellf9315512010-05-23 21:44:54 -0700485{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800486 struct rvt_sge *sge = &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700487
488 sge->vaddr += length;
489 sge->length -= length;
490 sge->sge_length -= length;
491 if (sge->sge_length == 0) {
492 if (--ss->num_sge)
493 *sge = *ss->sg_list++;
494 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800495 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700496 if (++sge->m >= sge->mr->mapsz)
497 return;
498 sge->n = 0;
499 }
500 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
501 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
502 }
503}
504
505#ifdef __LITTLE_ENDIAN
506static inline u32 get_upper_bits(u32 data, u32 shift)
507{
508 return data >> shift;
509}
510
511static inline u32 set_upper_bits(u32 data, u32 shift)
512{
513 return data << shift;
514}
515
516static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
517{
518 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
519 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
520 return data;
521}
522#else
523static inline u32 get_upper_bits(u32 data, u32 shift)
524{
525 return data << shift;
526}
527
528static inline u32 set_upper_bits(u32 data, u32 shift)
529{
530 return data >> shift;
531}
532
533static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
534{
535 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
536 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
537 return data;
538}
539#endif
540
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800541static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
Ralph Campbellf9315512010-05-23 21:44:54 -0700542 u32 length, unsigned flush_wc)
543{
544 u32 extra = 0;
545 u32 data = 0;
546 u32 last;
547
548 while (1) {
549 u32 len = ss->sge.length;
550 u32 off;
551
552 if (len > length)
553 len = length;
554 if (len > ss->sge.sge_length)
555 len = ss->sge.sge_length;
556 BUG_ON(len == 0);
557 /* If the source address is not aligned, try to align it. */
558 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
559 if (off) {
560 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
561 ~(sizeof(u32) - 1));
562 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
563 u32 y;
564
565 y = sizeof(u32) - off;
566 if (len > y)
567 len = y;
568 if (len + extra >= sizeof(u32)) {
569 data |= set_upper_bits(v, extra *
570 BITS_PER_BYTE);
571 len = sizeof(u32) - extra;
572 if (len == length) {
573 last = data;
574 break;
575 }
576 __raw_writel(data, piobuf);
577 piobuf++;
578 extra = 0;
579 data = 0;
580 } else {
581 /* Clear unused upper bytes */
582 data |= clear_upper_bytes(v, len, extra);
583 if (len == length) {
584 last = data;
585 break;
586 }
587 extra += len;
588 }
589 } else if (extra) {
590 /* Source address is aligned. */
591 u32 *addr = (u32 *) ss->sge.vaddr;
592 int shift = extra * BITS_PER_BYTE;
593 int ushift = 32 - shift;
594 u32 l = len;
595
596 while (l >= sizeof(u32)) {
597 u32 v = *addr;
598
599 data |= set_upper_bits(v, shift);
600 __raw_writel(data, piobuf);
601 data = get_upper_bits(v, ushift);
602 piobuf++;
603 addr++;
604 l -= sizeof(u32);
605 }
606 /*
607 * We still have 'extra' number of bytes leftover.
608 */
609 if (l) {
610 u32 v = *addr;
611
612 if (l + extra >= sizeof(u32)) {
613 data |= set_upper_bits(v, shift);
614 len -= l + extra - sizeof(u32);
615 if (len == length) {
616 last = data;
617 break;
618 }
619 __raw_writel(data, piobuf);
620 piobuf++;
621 extra = 0;
622 data = 0;
623 } else {
624 /* Clear unused upper bytes */
625 data |= clear_upper_bytes(v, l, extra);
626 if (len == length) {
627 last = data;
628 break;
629 }
630 extra += l;
631 }
632 } else if (len == length) {
633 last = data;
634 break;
635 }
636 } else if (len == length) {
637 u32 w;
638
639 /*
640 * Need to round up for the last dword in the
641 * packet.
642 */
643 w = (len + 3) >> 2;
644 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
645 piobuf += w - 1;
646 last = ((u32 *) ss->sge.vaddr)[w - 1];
647 break;
648 } else {
649 u32 w = len >> 2;
650
651 qib_pio_copy(piobuf, ss->sge.vaddr, w);
652 piobuf += w;
653
654 extra = len & (sizeof(u32) - 1);
655 if (extra) {
656 u32 v = ((u32 *) ss->sge.vaddr)[w];
657
658 /* Clear unused upper bytes */
659 data = clear_upper_bytes(v, extra, 0);
660 }
661 }
662 update_sge(ss, len);
663 length -= len;
664 }
665 /* Update address before sending packet. */
666 update_sge(ss, length);
667 if (flush_wc) {
668 /* must flush early everything before trigger word */
669 qib_flush_wc();
670 __raw_writel(last, piobuf);
671 /* be sure trigger word is written */
672 qib_flush_wc();
673 } else
674 __raw_writel(last, piobuf);
675}
676
Mike Marciniszyn48947102011-12-23 08:03:41 -0500677static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800678 struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700679{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800680 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700681 struct qib_verbs_txreq *tx;
682 unsigned long flags;
683
684 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondicd182012016-01-22 12:56:14 -0800685 spin_lock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700686
687 if (!list_empty(&dev->txreq_free)) {
688 struct list_head *l = dev->txreq_free.next;
689
690 list_del(l);
Harish Chegondicd182012016-01-22 12:56:14 -0800691 spin_unlock(&dev->rdi.pending_lock);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500692 spin_unlock_irqrestore(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700693 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
Ralph Campbellf9315512010-05-23 21:44:54 -0700694 } else {
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800695 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800696 list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700697 dev->n_txwait++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800698 qp->s_flags |= RVT_S_WAIT_TX;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800699 list_add_tail(&priv->iowait, &dev->txwait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700700 }
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800701 qp->s_flags &= ~RVT_S_BUSY;
Harish Chegondicd182012016-01-22 12:56:14 -0800702 spin_unlock(&dev->rdi.pending_lock);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500703 spin_unlock_irqrestore(&qp->s_lock, flags);
704 tx = ERR_PTR(-EBUSY);
Ralph Campbellf9315512010-05-23 21:44:54 -0700705 }
Mike Marciniszyn48947102011-12-23 08:03:41 -0500706 return tx;
707}
Ralph Campbellf9315512010-05-23 21:44:54 -0700708
Mike Marciniszyn48947102011-12-23 08:03:41 -0500709static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800710 struct rvt_qp *qp)
Mike Marciniszyn48947102011-12-23 08:03:41 -0500711{
712 struct qib_verbs_txreq *tx;
713 unsigned long flags;
Ralph Campbellf9315512010-05-23 21:44:54 -0700714
Harish Chegondicd182012016-01-22 12:56:14 -0800715 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500716 /* assume the list non empty */
717 if (likely(!list_empty(&dev->txreq_free))) {
718 struct list_head *l = dev->txreq_free.next;
719
720 list_del(l);
Harish Chegondicd182012016-01-22 12:56:14 -0800721 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500722 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
723 } else {
724 /* call slow path to get the extra lock */
Harish Chegondicd182012016-01-22 12:56:14 -0800725 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Mike Marciniszyn48947102011-12-23 08:03:41 -0500726 tx = __get_txreq(dev, qp);
727 }
Ralph Campbellf9315512010-05-23 21:44:54 -0700728 return tx;
729}
730
731void qib_put_txreq(struct qib_verbs_txreq *tx)
732{
733 struct qib_ibdev *dev;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800734 struct rvt_qp *qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800735 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700736 unsigned long flags;
737
738 qp = tx->qp;
739 dev = to_idev(qp->ibqp.device);
740
Ralph Campbellf9315512010-05-23 21:44:54 -0700741 if (tx->mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800742 rvt_put_mr(tx->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700743 tx->mr = NULL;
744 }
745 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
746 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
747 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
748 tx->txreq.addr, tx->hdr_dwords << 2,
749 DMA_TO_DEVICE);
750 kfree(tx->align_buf);
751 }
752
Harish Chegondicd182012016-01-22 12:56:14 -0800753 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700754
755 /* Put struct back on free list */
756 list_add(&tx->txreq.list, &dev->txreq_free);
757
758 if (!list_empty(&dev->txwait)) {
759 /* Wake up first QP wanting a free struct */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800760 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
761 iowait);
762 qp = priv->owner;
763 list_del_init(&priv->iowait);
Sebastian Sanchez238b1862016-12-07 19:34:00 -0800764 rvt_get_qp(qp);
Harish Chegondicd182012016-01-22 12:56:14 -0800765 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700766
767 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800768 if (qp->s_flags & RVT_S_WAIT_TX) {
769 qp->s_flags &= ~RVT_S_WAIT_TX;
Ralph Campbellf9315512010-05-23 21:44:54 -0700770 qib_schedule_send(qp);
771 }
772 spin_unlock_irqrestore(&qp->s_lock, flags);
773
Sebastian Sanchez238b1862016-12-07 19:34:00 -0800774 rvt_put_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700775 } else
Harish Chegondicd182012016-01-22 12:56:14 -0800776 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700777}
778
779/*
780 * This is called when there are send DMA descriptors that might be
781 * available.
782 *
783 * This is called with ppd->sdma_lock held.
784 */
785void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
786{
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800787 struct rvt_qp *qp, *nqp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800788 struct qib_qp_priv *qpp, *nqpp;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800789 struct rvt_qp *qps[20];
Ralph Campbellf9315512010-05-23 21:44:54 -0700790 struct qib_ibdev *dev;
791 unsigned i, n;
792
793 n = 0;
794 dev = &ppd->dd->verbs_dev;
Harish Chegondicd182012016-01-22 12:56:14 -0800795 spin_lock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700796
797 /* Search wait list for first QP wanting DMA descriptors. */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800798 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
799 qp = qpp->owner;
800 nqp = nqpp->owner;
Ralph Campbellf9315512010-05-23 21:44:54 -0700801 if (qp->port_num != ppd->port)
802 continue;
803 if (n == ARRAY_SIZE(qps))
804 break;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800805 if (qpp->s_tx->txreq.sg_count > avail)
Ralph Campbellf9315512010-05-23 21:44:54 -0700806 break;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800807 avail -= qpp->s_tx->txreq.sg_count;
808 list_del_init(&qpp->iowait);
Sebastian Sanchez238b1862016-12-07 19:34:00 -0800809 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700810 qps[n++] = qp;
811 }
812
Harish Chegondicd182012016-01-22 12:56:14 -0800813 spin_unlock(&dev->rdi.pending_lock);
Ralph Campbellf9315512010-05-23 21:44:54 -0700814
815 for (i = 0; i < n; i++) {
816 qp = qps[i];
817 spin_lock(&qp->s_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800818 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
819 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
Ralph Campbellf9315512010-05-23 21:44:54 -0700820 qib_schedule_send(qp);
821 }
822 spin_unlock(&qp->s_lock);
Sebastian Sanchez238b1862016-12-07 19:34:00 -0800823 rvt_put_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700824 }
825}
826
827/*
828 * This is called with ppd->sdma_lock held.
829 */
830static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
831{
832 struct qib_verbs_txreq *tx =
833 container_of(cookie, struct qib_verbs_txreq, txreq);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800834 struct rvt_qp *qp = tx->qp;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800835 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700836
837 spin_lock(&qp->s_lock);
838 if (tx->wqe)
839 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
840 else if (qp->ibqp.qp_type == IB_QPT_RC) {
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700841 struct ib_header *hdr;
Ralph Campbellf9315512010-05-23 21:44:54 -0700842
843 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
844 hdr = &tx->align_buf->hdr;
845 else {
846 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
847
848 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
849 }
850 qib_rc_send_complete(qp, hdr);
851 }
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800852 if (atomic_dec_and_test(&priv->s_dma_busy)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700853 if (qp->state == IB_QPS_RESET)
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800854 wake_up(&priv->wait_dma);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800855 else if (qp->s_flags & RVT_S_WAIT_DMA) {
856 qp->s_flags &= ~RVT_S_WAIT_DMA;
Ralph Campbellf9315512010-05-23 21:44:54 -0700857 qib_schedule_send(qp);
858 }
859 }
860 spin_unlock(&qp->s_lock);
861
862 qib_put_txreq(tx);
863}
864
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800865static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700866{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800867 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700868 unsigned long flags;
869 int ret = 0;
870
871 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800872 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Harish Chegondicd182012016-01-22 12:56:14 -0800873 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800874 if (list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700875 if (list_empty(&dev->memwait))
876 mod_timer(&dev->mem_timer, jiffies + 1);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800877 qp->s_flags |= RVT_S_WAIT_KMEM;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800878 list_add_tail(&priv->iowait, &dev->memwait);
Ralph Campbellf9315512010-05-23 21:44:54 -0700879 }
Harish Chegondicd182012016-01-22 12:56:14 -0800880 spin_unlock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800881 qp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700882 ret = -EBUSY;
883 }
884 spin_unlock_irqrestore(&qp->s_lock, flags);
885
886 return ret;
887}
888
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700889static int qib_verbs_send_dma(struct rvt_qp *qp, struct ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800890 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
Ralph Campbellf9315512010-05-23 21:44:54 -0700891 u32 plen, u32 dwords)
892{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800893 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700894 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
895 struct qib_devdata *dd = dd_from_dev(dev);
896 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
897 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
898 struct qib_verbs_txreq *tx;
899 struct qib_pio_header *phdr;
900 u32 control;
901 u32 ndesc;
902 int ret;
903
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800904 tx = priv->s_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -0700905 if (tx) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800906 priv->s_tx = NULL;
Ralph Campbellf9315512010-05-23 21:44:54 -0700907 /* resend previously constructed packet */
908 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
909 goto bail;
910 }
911
Mike Marciniszyn48947102011-12-23 08:03:41 -0500912 tx = get_txreq(dev, qp);
913 if (IS_ERR(tx))
914 goto bail_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -0700915
916 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
917 be16_to_cpu(hdr->lrh[0]) >> 12);
918 tx->qp = qp;
Ralph Campbellf9315512010-05-23 21:44:54 -0700919 tx->wqe = qp->s_wqe;
920 tx->mr = qp->s_rdma_mr;
921 if (qp->s_rdma_mr)
922 qp->s_rdma_mr = NULL;
923 tx->txreq.callback = sdma_complete;
924 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
925 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
926 else
927 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
928 if (plen + 1 > dd->piosize2kmax_dwords)
929 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
930
931 if (len) {
932 /*
933 * Don't try to DMA if it takes more descriptors than
934 * the queue holds.
935 */
936 ndesc = qib_count_sge(ss, len);
937 if (ndesc >= ppd->sdma_descq_cnt)
938 ndesc = 0;
939 } else
940 ndesc = 1;
941 if (ndesc) {
942 phdr = &dev->pio_hdrs[tx->hdr_inx];
943 phdr->pbc[0] = cpu_to_le32(plen);
944 phdr->pbc[1] = cpu_to_le32(control);
945 memcpy(&phdr->hdr, hdr, hdrwords << 2);
946 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
947 tx->txreq.sg_count = ndesc;
948 tx->txreq.addr = dev->pio_hdrs_phys +
949 tx->hdr_inx * sizeof(struct qib_pio_header);
950 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
951 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
952 goto bail;
953 }
954
955 /* Allocate a buffer and copy the header and payload to it. */
956 tx->hdr_dwords = plen + 1;
957 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
958 if (!phdr)
959 goto err_tx;
960 phdr->pbc[0] = cpu_to_le32(plen);
961 phdr->pbc[1] = cpu_to_le32(control);
962 memcpy(&phdr->hdr, hdr, hdrwords << 2);
963 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
964
965 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
966 tx->hdr_dwords << 2, DMA_TO_DEVICE);
967 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
968 goto map_err;
969 tx->align_buf = phdr;
970 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
971 tx->txreq.sg_count = 1;
972 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
973 goto unaligned;
974
975map_err:
976 kfree(phdr);
977err_tx:
978 qib_put_txreq(tx);
979 ret = wait_kmem(dev, qp);
980unaligned:
Harish Chegondif24a6d42016-01-22 12:56:02 -0800981 ibp->rvp.n_unaligned++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700982bail:
983 return ret;
Mike Marciniszyn48947102011-12-23 08:03:41 -0500984bail_tx:
985 ret = PTR_ERR(tx);
986 goto bail;
Ralph Campbellf9315512010-05-23 21:44:54 -0700987}
988
989/*
990 * If we are now in the error state, return zero to flush the
991 * send work request.
992 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800993static int no_bufs_available(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700994{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800995 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700996 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
997 struct qib_devdata *dd;
998 unsigned long flags;
999 int ret = 0;
1000
1001 /*
1002 * Note that as soon as want_buffer() is called and
1003 * possibly before it returns, qib_ib_piobufavail()
1004 * could be called. Therefore, put QP on the I/O wait list before
1005 * enabling the PIO avail interrupt.
1006 */
1007 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001008 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Harish Chegondicd182012016-01-22 12:56:14 -08001009 spin_lock(&dev->rdi.pending_lock);
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001010 if (list_empty(&priv->iowait)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001011 dev->n_piowait++;
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001012 qp->s_flags |= RVT_S_WAIT_PIO;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001013 list_add_tail(&priv->iowait, &dev->piowait);
Ralph Campbellf9315512010-05-23 21:44:54 -07001014 dd = dd_from_dev(dev);
1015 dd->f_wantpiobuf_intr(dd, 1);
1016 }
Harish Chegondicd182012016-01-22 12:56:14 -08001017 spin_unlock(&dev->rdi.pending_lock);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001018 qp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -07001019 ret = -EBUSY;
1020 }
1021 spin_unlock_irqrestore(&qp->s_lock, flags);
1022 return ret;
1023}
1024
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001025static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001026 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
Ralph Campbellf9315512010-05-23 21:44:54 -07001027 u32 plen, u32 dwords)
1028{
1029 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1030 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1031 u32 *hdr = (u32 *) ibhdr;
1032 u32 __iomem *piobuf_orig;
1033 u32 __iomem *piobuf;
1034 u64 pbc;
1035 unsigned long flags;
1036 unsigned flush_wc;
1037 u32 control;
1038 u32 pbufn;
1039
1040 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1041 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1042 pbc = ((u64) control << 32) | plen;
1043 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1044 if (unlikely(piobuf == NULL))
1045 return no_bufs_available(qp);
1046
1047 /*
1048 * Write the pbc.
1049 * We have to flush after the PBC for correctness on some cpus
1050 * or WC buffer can be written out of order.
1051 */
1052 writeq(pbc, piobuf);
1053 piobuf_orig = piobuf;
1054 piobuf += 2;
1055
1056 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1057 if (len == 0) {
1058 /*
1059 * If there is just the header portion, must flush before
1060 * writing last word of header for correctness, and after
1061 * the last header word (trigger word).
1062 */
1063 if (flush_wc) {
1064 qib_flush_wc();
1065 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1066 qib_flush_wc();
1067 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1068 qib_flush_wc();
1069 } else
1070 qib_pio_copy(piobuf, hdr, hdrwords);
1071 goto done;
1072 }
1073
1074 if (flush_wc)
1075 qib_flush_wc();
1076 qib_pio_copy(piobuf, hdr, hdrwords);
1077 piobuf += hdrwords;
1078
1079 /* The common case is aligned and contained in one segment. */
1080 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1081 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1082 u32 *addr = (u32 *) ss->sge.vaddr;
1083
1084 /* Update address before sending packet. */
1085 update_sge(ss, len);
1086 if (flush_wc) {
1087 qib_pio_copy(piobuf, addr, dwords - 1);
1088 /* must flush early everything before trigger word */
1089 qib_flush_wc();
1090 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1091 /* be sure trigger word is written */
1092 qib_flush_wc();
1093 } else
1094 qib_pio_copy(piobuf, addr, dwords);
1095 goto done;
1096 }
1097 copy_io(piobuf, ss, len, flush_wc);
1098done:
1099 if (dd->flags & QIB_USE_SPCL_TRIG) {
1100 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
Mike Marciniszynda12c1f2015-01-16 11:23:31 -05001101
Ralph Campbellf9315512010-05-23 21:44:54 -07001102 qib_flush_wc();
1103 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1104 }
1105 qib_sendbuf_done(dd, pbufn);
1106 if (qp->s_rdma_mr) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001107 rvt_put_mr(qp->s_rdma_mr);
Ralph Campbellf9315512010-05-23 21:44:54 -07001108 qp->s_rdma_mr = NULL;
1109 }
1110 if (qp->s_wqe) {
1111 spin_lock_irqsave(&qp->s_lock, flags);
1112 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1113 spin_unlock_irqrestore(&qp->s_lock, flags);
1114 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1115 spin_lock_irqsave(&qp->s_lock, flags);
1116 qib_rc_send_complete(qp, ibhdr);
1117 spin_unlock_irqrestore(&qp->s_lock, flags);
1118 }
1119 return 0;
1120}
1121
1122/**
1123 * qib_verbs_send - send a packet
1124 * @qp: the QP to send on
1125 * @hdr: the packet header
1126 * @hdrwords: the number of 32-bit words in the header
1127 * @ss: the SGE to send
1128 * @len: the length of the packet in bytes
1129 *
1130 * Return zero if packet is sent or queued OK.
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001131 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
Ralph Campbellf9315512010-05-23 21:44:54 -07001132 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001133int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001134 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
Ralph Campbellf9315512010-05-23 21:44:54 -07001135{
1136 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1137 u32 plen;
1138 int ret;
1139 u32 dwords = (len + 3) >> 2;
1140
1141 /*
1142 * Calculate the send buffer trigger address.
1143 * The +1 counts for the pbc control dword following the pbc length.
1144 */
1145 plen = hdrwords + dwords + 1;
1146
1147 /*
1148 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1149 * can defer SDMA restart until link goes ACTIVE without
1150 * worrying about just how we got there.
1151 */
1152 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1153 !(dd->flags & QIB_HAS_SEND_DMA))
1154 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1155 plen, dwords);
1156 else
1157 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1158 plen, dwords);
1159
1160 return ret;
1161}
1162
1163int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1164 u64 *rwords, u64 *spkts, u64 *rpkts,
1165 u64 *xmit_wait)
1166{
1167 int ret;
1168 struct qib_devdata *dd = ppd->dd;
1169
1170 if (!(dd->flags & QIB_PRESENT)) {
1171 /* no hardware, freeze, etc. */
1172 ret = -EINVAL;
1173 goto bail;
1174 }
1175 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1176 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1177 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1178 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1179 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1180
1181 ret = 0;
1182
1183bail:
1184 return ret;
1185}
1186
1187/**
1188 * qib_get_counters - get various chip counters
1189 * @dd: the qlogic_ib device
1190 * @cntrs: counters are placed here
1191 *
1192 * Return the counters needed by recv_pma_get_portcounters().
1193 */
1194int qib_get_counters(struct qib_pportdata *ppd,
1195 struct qib_verbs_counters *cntrs)
1196{
1197 int ret;
1198
1199 if (!(ppd->dd->flags & QIB_PRESENT)) {
1200 /* no hardware, freeze, etc. */
1201 ret = -EINVAL;
1202 goto bail;
1203 }
1204 cntrs->symbol_error_counter =
1205 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1206 cntrs->link_error_recovery_counter =
1207 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1208 /*
1209 * The link downed counter counts when the other side downs the
1210 * connection. We add in the number of times we downed the link
1211 * due to local link integrity errors to compensate.
1212 */
1213 cntrs->link_downed_counter =
1214 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1215 cntrs->port_rcv_errors =
1216 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1217 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1218 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1219 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1220 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1221 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1222 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1223 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1224 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1225 cntrs->port_rcv_errors +=
1226 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1227 cntrs->port_rcv_errors +=
1228 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1229 cntrs->port_rcv_remphys_errors =
1230 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1231 cntrs->port_xmit_discards =
1232 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1233 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1234 QIBPORTCNTR_WORDSEND);
1235 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1236 QIBPORTCNTR_WORDRCV);
1237 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1238 QIBPORTCNTR_PKTSEND);
1239 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1240 QIBPORTCNTR_PKTRCV);
1241 cntrs->local_link_integrity_errors =
1242 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1243 cntrs->excessive_buffer_overrun_errors =
1244 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1245 cntrs->vl15_dropped =
1246 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1247
1248 ret = 0;
1249
1250bail:
1251 return ret;
1252}
1253
1254/**
1255 * qib_ib_piobufavail - callback when a PIO buffer is available
1256 * @dd: the device pointer
1257 *
1258 * This is called from qib_intr() at interrupt level when a PIO buffer is
1259 * available after qib_verbs_send() returned an error that no buffers were
1260 * available. Disable the interrupt if there are no more QPs waiting.
1261 */
1262void qib_ib_piobufavail(struct qib_devdata *dd)
1263{
1264 struct qib_ibdev *dev = &dd->verbs_dev;
1265 struct list_head *list;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001266 struct rvt_qp *qps[5];
1267 struct rvt_qp *qp;
Ralph Campbellf9315512010-05-23 21:44:54 -07001268 unsigned long flags;
1269 unsigned i, n;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001270 struct qib_qp_priv *priv;
Ralph Campbellf9315512010-05-23 21:44:54 -07001271
1272 list = &dev->piowait;
1273 n = 0;
1274
1275 /*
1276 * Note: checking that the piowait list is empty and clearing
1277 * the buffer available interrupt needs to be atomic or we
1278 * could end up with QPs on the wait list with the interrupt
1279 * disabled.
1280 */
Harish Chegondicd182012016-01-22 12:56:14 -08001281 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001282 while (!list_empty(list)) {
1283 if (n == ARRAY_SIZE(qps))
1284 goto full;
Dennis Dalessandroffc26902016-01-22 12:45:11 -08001285 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1286 qp = priv->owner;
1287 list_del_init(&priv->iowait);
Sebastian Sanchez238b1862016-12-07 19:34:00 -08001288 rvt_get_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001289 qps[n++] = qp;
1290 }
1291 dd->f_wantpiobuf_intr(dd, 0);
1292full:
Harish Chegondicd182012016-01-22 12:56:14 -08001293 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -07001294
1295 for (i = 0; i < n; i++) {
1296 qp = qps[i];
1297
1298 spin_lock_irqsave(&qp->s_lock, flags);
Harish Chegondi01ba79d2016-01-22 12:56:46 -08001299 if (qp->s_flags & RVT_S_WAIT_PIO) {
1300 qp->s_flags &= ~RVT_S_WAIT_PIO;
Ralph Campbellf9315512010-05-23 21:44:54 -07001301 qib_schedule_send(qp);
1302 }
1303 spin_unlock_irqrestore(&qp->s_lock, flags);
1304
1305 /* Notify qib_destroy_qp() if it is waiting. */
Sebastian Sanchez238b1862016-12-07 19:34:00 -08001306 rvt_put_qp(qp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001307 }
1308}
1309
Harish Chegondi530a5d82016-02-03 14:20:52 -08001310static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
Ralph Campbellf9315512010-05-23 21:44:54 -07001311 struct ib_port_attr *props)
1312{
Harish Chegondi530a5d82016-02-03 14:20:52 -08001313 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1314 struct qib_devdata *dd = dd_from_dev(ibdev);
1315 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
Ralph Campbellf9315512010-05-23 21:44:54 -07001316 enum ib_mtu mtu;
1317 u16 lid = ppd->lid;
1318
Ralph Campbellf9315512010-05-23 21:44:54 -07001319 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1320 props->lmc = ppd->lmc;
Ralph Campbellf9315512010-05-23 21:44:54 -07001321 props->state = dd->f_iblink_state(ppd->lastibcstat);
1322 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
Ralph Campbellf9315512010-05-23 21:44:54 -07001323 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
Ralph Campbellf9315512010-05-23 21:44:54 -07001324 props->active_width = ppd->link_width_active;
1325 /* See rate_show() */
1326 props->active_speed = ppd->link_speed_active;
1327 props->max_vl_num = qib_num_vls(ppd->vls_supported);
Ralph Campbellf9315512010-05-23 21:44:54 -07001328
1329 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1330 switch (ppd->ibmtu) {
1331 case 4096:
1332 mtu = IB_MTU_4096;
1333 break;
1334 case 2048:
1335 mtu = IB_MTU_2048;
1336 break;
1337 case 1024:
1338 mtu = IB_MTU_1024;
1339 break;
1340 case 512:
1341 mtu = IB_MTU_512;
1342 break;
1343 case 256:
1344 mtu = IB_MTU_256;
1345 break;
1346 default:
1347 mtu = IB_MTU_2048;
1348 }
1349 props->active_mtu = mtu;
Ralph Campbellf9315512010-05-23 21:44:54 -07001350
1351 return 0;
1352}
1353
1354static int qib_modify_device(struct ib_device *device,
1355 int device_modify_mask,
1356 struct ib_device_modify *device_modify)
1357{
1358 struct qib_devdata *dd = dd_from_ibdev(device);
1359 unsigned i;
1360 int ret;
1361
1362 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1363 IB_DEVICE_MODIFY_NODE_DESC)) {
1364 ret = -EOPNOTSUPP;
1365 goto bail;
1366 }
1367
1368 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001369 memcpy(device->node_desc, device_modify->node_desc,
1370 IB_DEVICE_NODE_DESC_MAX);
Ralph Campbellf9315512010-05-23 21:44:54 -07001371 for (i = 0; i < dd->num_pports; i++) {
1372 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1373
1374 qib_node_desc_chg(ibp);
1375 }
1376 }
1377
1378 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1379 ib_qib_sys_image_guid =
1380 cpu_to_be64(device_modify->sys_image_guid);
1381 for (i = 0; i < dd->num_pports; i++) {
1382 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1383
1384 qib_sys_guid_chg(ibp);
1385 }
1386 }
1387
1388 ret = 0;
1389
1390bail:
1391 return ret;
1392}
1393
Harish Chegondi20f333b2016-02-14 12:09:55 -08001394static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
Ralph Campbellf9315512010-05-23 21:44:54 -07001395{
Harish Chegondi530a5d82016-02-03 14:20:52 -08001396 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
1397 struct qib_devdata *dd = dd_from_dev(ibdev);
1398 struct qib_pportdata *ppd = &dd->pport[port_num - 1];
Ralph Campbellf9315512010-05-23 21:44:54 -07001399
Harish Chegondi530a5d82016-02-03 14:20:52 -08001400 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1401
Ralph Campbellf9315512010-05-23 21:44:54 -07001402 return 0;
1403}
1404
Dennis Dalessandro23667542016-02-03 14:20:44 -08001405static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1406 int guid_index, __be64 *guid)
Ralph Campbellf9315512010-05-23 21:44:54 -07001407{
Dennis Dalessandro23667542016-02-03 14:20:44 -08001408 struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
1409 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
Ralph Campbellf9315512010-05-23 21:44:54 -07001410
Dennis Dalessandro23667542016-02-03 14:20:44 -08001411 if (guid_index == 0)
1412 *guid = ppd->guid;
1413 else if (guid_index < QIB_GUIDS_PER_PORT)
1414 *guid = ibp->guids[guid_index - 1];
1415 else
1416 return -EINVAL;
Ralph Campbellf9315512010-05-23 21:44:54 -07001417
Dennis Dalessandro23667542016-02-03 14:20:44 -08001418 return 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001419}
1420
Ralph Campbellf9315512010-05-23 21:44:54 -07001421int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1422{
Ralph Campbellf9315512010-05-23 21:44:54 -07001423 if (ah_attr->sl > 15)
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08001424 return -EINVAL;
1425
Ralph Campbellf9315512010-05-23 21:44:54 -07001426 return 0;
Ralph Campbellf9315512010-05-23 21:44:54 -07001427}
1428
Harish Chegondi5418a5a2016-01-22 12:56:08 -08001429static void qib_notify_new_ah(struct ib_device *ibdev,
1430 struct ib_ah_attr *ah_attr,
1431 struct rvt_ah *ah)
1432{
1433 struct qib_ibport *ibp;
1434 struct qib_pportdata *ppd;
1435
1436 /*
1437 * Do not trust reading anything from rvt_ah at this point as it is not
1438 * done being setup. We can however modify things which we need to set.
1439 */
1440
1441 ibp = to_iport(ibdev, ah_attr->port_num);
1442 ppd = ppd_from_ibp(ibp);
1443 ah->vl = ibp->sl_to_vl[ah->attr.sl];
1444 ah->log_pmtu = ilog2(ppd->ibmtu);
1445}
1446
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001447struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1448{
1449 struct ib_ah_attr attr;
1450 struct ib_ah *ah = ERR_PTR(-EINVAL);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001451 struct rvt_qp *qp0;
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001452
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001453 memset(&attr, 0, sizeof(attr));
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001454 attr.dlid = dlid;
1455 attr.port_num = ppd_from_ibp(ibp)->port;
1456 rcu_read_lock();
Harish Chegondif24a6d42016-01-22 12:56:02 -08001457 qp0 = rcu_dereference(ibp->rvp.qp[0]);
Mike Marciniszyn1fb9fed2012-07-16 17:11:06 +00001458 if (qp0)
1459 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1460 rcu_read_unlock();
1461 return ah;
1462}
1463
Ralph Campbellf9315512010-05-23 21:44:54 -07001464/**
Ralph Campbellf9315512010-05-23 21:44:54 -07001465 * qib_get_npkeys - return the size of the PKEY table for context 0
1466 * @dd: the qlogic_ib device
1467 */
1468unsigned qib_get_npkeys(struct qib_devdata *dd)
1469{
1470 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1471}
1472
1473/*
1474 * Return the indexed PKEY from the port PKEY table.
1475 * No need to validate rcd[ctxt]; the port is setup if we are here.
1476 */
1477unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1478{
1479 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1480 struct qib_devdata *dd = ppd->dd;
1481 unsigned ctxt = ppd->hw_pidx;
1482 unsigned ret;
1483
1484 /* dd->rcd null if mini_init or some init failures */
1485 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1486 ret = 0;
1487 else
1488 ret = dd->rcd[ctxt]->pkeys[index];
1489
1490 return ret;
1491}
1492
Ralph Campbellf9315512010-05-23 21:44:54 -07001493static void init_ibport(struct qib_pportdata *ppd)
1494{
1495 struct qib_verbs_counters cntrs;
1496 struct qib_ibport *ibp = &ppd->ibport_data;
1497
Harish Chegondif24a6d42016-01-22 12:56:02 -08001498 spin_lock_init(&ibp->rvp.lock);
Ralph Campbellf9315512010-05-23 21:44:54 -07001499 /* Set the prefix to the default value (see ch. 4.1.1) */
Harish Chegondif24a6d42016-01-22 12:56:02 -08001500 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1501 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1502 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
Ralph Campbellf9315512010-05-23 21:44:54 -07001503 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1504 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1505 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1506 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1507 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
Harish Chegondif24a6d42016-01-22 12:56:02 -08001508 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1509 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1510 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1511 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1512 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1513 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
Ralph Campbellf9315512010-05-23 21:44:54 -07001514
1515 /* Snapshot current HW counters to "clear" them. */
1516 qib_get_counters(ppd, &cntrs);
1517 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1518 ibp->z_link_error_recovery_counter =
1519 cntrs.link_error_recovery_counter;
1520 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1521 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1522 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1523 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1524 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1525 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1526 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1527 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1528 ibp->z_local_link_integrity_errors =
1529 cntrs.local_link_integrity_errors;
1530 ibp->z_excessive_buffer_overrun_errors =
1531 cntrs.excessive_buffer_overrun_errors;
1532 ibp->z_vl15_dropped = cntrs.vl15_dropped;
Harish Chegondif24a6d42016-01-22 12:56:02 -08001533 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1534 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001535}
1536
1537/**
Harish Chegondi0aeddea2016-01-22 12:56:40 -08001538 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1539 * @dd: the device data structure
1540 */
1541static void qib_fill_device_attr(struct qib_devdata *dd)
1542{
1543 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1544
1545 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1546
1547 rdi->dparms.props.max_pd = ib_qib_max_pds;
1548 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1549 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1550 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1551 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1552 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1553 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1554 rdi->dparms.props.vendor_id =
1555 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1556 rdi->dparms.props.vendor_part_id = dd->deviceid;
1557 rdi->dparms.props.hw_ver = dd->minrev;
1558 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1559 rdi->dparms.props.max_mr_size = ~0ULL;
1560 rdi->dparms.props.max_qp = ib_qib_max_qps;
1561 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1562 rdi->dparms.props.max_sge = ib_qib_max_sges;
1563 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1564 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1565 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1566 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1567 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1568 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1569 rdi->dparms.props.max_map_per_fmr = 32767;
1570 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1571 rdi->dparms.props.max_qp_init_rd_atom = 255;
1572 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1573 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1574 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1575 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1576 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1577 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1578 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1579 rdi->dparms.props.max_total_mcast_qp_attach =
1580 rdi->dparms.props.max_mcast_qp_attach *
1581 rdi->dparms.props.max_mcast_grp;
Mike Marciniszyn9ec4faa2016-07-01 16:02:18 -07001582 /* post send table */
1583 dd->verbs_dev.rdi.post_parms = qib_post_parms;
Harish Chegondi0aeddea2016-01-22 12:56:40 -08001584}
1585
1586/**
Ralph Campbellf9315512010-05-23 21:44:54 -07001587 * qib_register_ib_device - register our device with the infiniband core
1588 * @dd: the device data structure
1589 * Return the allocated qib_ibdev pointer or NULL on error.
1590 */
1591int qib_register_ib_device(struct qib_devdata *dd)
1592{
1593 struct qib_ibdev *dev = &dd->verbs_dev;
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001594 struct ib_device *ibdev = &dev->rdi.ibdev;
Ralph Campbellf9315512010-05-23 21:44:54 -07001595 struct qib_pportdata *ppd = dd->pport;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001596 unsigned i, ctxt;
Ralph Campbellf9315512010-05-23 21:44:54 -07001597 int ret;
1598
Mike Marciniszynaf061a62011-09-23 13:16:44 -04001599 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
Ralph Campbellf9315512010-05-23 21:44:54 -07001600 for (i = 0; i < dd->num_pports; i++)
1601 init_ibport(ppd + i);
1602
1603 /* Only need to initialize non-zero fields. */
Hari Prasath Gujulan Elango045277c2016-02-04 11:03:45 -08001604 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
Ralph Campbellf9315512010-05-23 21:44:54 -07001605
Ralph Campbellf9315512010-05-23 21:44:54 -07001606 INIT_LIST_HEAD(&dev->piowait);
1607 INIT_LIST_HEAD(&dev->dmawait);
1608 INIT_LIST_HEAD(&dev->txwait);
1609 INIT_LIST_HEAD(&dev->memwait);
1610 INIT_LIST_HEAD(&dev->txreq_free);
1611
1612 if (ppd->sdma_descq_cnt) {
1613 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1614 ppd->sdma_descq_cnt *
1615 sizeof(struct qib_pio_header),
1616 &dev->pio_hdrs_phys,
1617 GFP_KERNEL);
1618 if (!dev->pio_hdrs) {
1619 ret = -ENOMEM;
1620 goto err_hdrs;
1621 }
1622 }
1623
1624 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1625 struct qib_verbs_txreq *tx;
1626
Mike Marciniszyn041af0b2015-01-16 10:50:32 -05001627 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
Ralph Campbellf9315512010-05-23 21:44:54 -07001628 if (!tx) {
1629 ret = -ENOMEM;
1630 goto err_tx;
1631 }
1632 tx->hdr_inx = i;
1633 list_add(&tx->txreq.list, &dev->txreq_free);
1634 }
1635
1636 /*
1637 * The system image GUID is supposed to be the same for all
1638 * IB HCAs in a single system but since there can be other
1639 * device types in the system, we can't be sure this is unique.
1640 */
1641 if (!ib_qib_sys_image_guid)
1642 ib_qib_sys_image_guid = ppd->guid;
1643
1644 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1645 ibdev->owner = THIS_MODULE;
1646 ibdev->node_guid = ppd->guid;
Ralph Campbellf9315512010-05-23 21:44:54 -07001647 ibdev->phys_port_cnt = dd->num_pports;
Ralph Campbellf9315512010-05-23 21:44:54 -07001648 ibdev->dma_device = &dd->pcidev->dev;
Ralph Campbellf9315512010-05-23 21:44:54 -07001649 ibdev->modify_device = qib_modify_device;
Ralph Campbellf9315512010-05-23 21:44:54 -07001650 ibdev->process_mad = qib_process_mad;
Ralph Campbellf9315512010-05-23 21:44:54 -07001651
1652 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
Vinit Agnihotrie2eed582013-03-14 18:13:41 +00001653 "Intel Infiniband HCA %s", init_utsname()->nodename);
Ralph Campbellf9315512010-05-23 21:44:54 -07001654
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001655 /*
1656 * Fill in rvt info object.
1657 */
1658 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
Dennis Dalessandro6a9df402016-01-22 12:45:20 -08001659 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
1660 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -08001661 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001662 dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
Harish Chegondi5418a5a2016-01-22 12:56:08 -08001663 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
Harish Chegondi20f333b2016-02-14 12:09:55 -08001664 dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
1665 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
1666 dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001667 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
Harish Chegondi20f333b2016-02-14 12:09:55 -08001668 dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -08001669 dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
1670 dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
Harish Chegondi20f333b2016-02-14 12:09:55 -08001671 dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
1672 dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
1673 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
1674 dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
1675 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
1676 dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
1677 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001678 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
Harish Chegondi530a5d82016-02-03 14:20:52 -08001679 dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
Harish Chegondi20f333b2016-02-14 12:09:55 -08001680 dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
Harish Chegondi530a5d82016-02-03 14:20:52 -08001681 dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
Dennis Dalessandro611ac092016-02-14 12:10:45 -08001682 dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
1683 qib_notify_create_mad_agent;
1684 dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
1685 qib_notify_free_mad_agent;
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001686
Harish Chegondi70696ea2016-02-03 14:20:27 -08001687 dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
Dennis Dalessandro23667542016-02-03 14:20:44 -08001688 dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001689 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
Harish Chegondi47c7ea62016-01-22 12:56:52 -08001690 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
1691 dd->verbs_dev.rdi.dparms.qpn_start = 1;
1692 dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
1693 dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
1694 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1695 dd->verbs_dev.rdi.dparms.qos_shift = 1;
Harish Chegondi034a3e72016-01-22 13:08:01 -08001696 dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
Harish Chegondi70696ea2016-02-03 14:20:27 -08001697 dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
1698 dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001699 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1700 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
Harish Chegondi4bb88e52016-01-22 13:07:36 -08001701 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
Harish Chegondi530a5d82016-02-03 14:20:52 -08001702 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1703 dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
1704
Harish Chegondi4bb88e52016-01-22 13:07:36 -08001705 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1706 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1707 "qib_cq%d", dd->unit);
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001708
Harish Chegondi0aeddea2016-01-22 12:56:40 -08001709 qib_fill_device_attr(dd);
1710
Harish Chegondi76fec3e2016-01-22 12:56:21 -08001711 ppd = dd->pport;
1712 for (i = 0; i < dd->num_pports; i++, ppd++) {
1713 ctxt = ppd->hw_pidx;
1714 rvt_init_port(&dd->verbs_dev.rdi,
1715 &ppd->ibport_data.rvp,
1716 i,
1717 dd->rcd[ctxt]->pkeys);
1718 }
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001719
1720 ret = rvt_register_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07001721 if (ret)
Dennis Dalessandro5196aa92016-01-22 13:07:30 -08001722 goto err_tx;
Ralph Campbellf9315512010-05-23 21:44:54 -07001723
Mike Marciniszync9bdad32013-03-28 18:17:20 +00001724 ret = qib_verbs_register_sysfs(dd);
1725 if (ret)
Ralph Campbellf9315512010-05-23 21:44:54 -07001726 goto err_class;
1727
Dennis Dalessandro5196aa92016-01-22 13:07:30 -08001728 return ret;
Ralph Campbellf9315512010-05-23 21:44:54 -07001729
1730err_class:
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001731 rvt_unregister_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07001732err_tx:
1733 while (!list_empty(&dev->txreq_free)) {
1734 struct list_head *l = dev->txreq_free.next;
1735 struct qib_verbs_txreq *tx;
1736
1737 list_del(l);
1738 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1739 kfree(tx);
1740 }
1741 if (ppd->sdma_descq_cnt)
1742 dma_free_coherent(&dd->pcidev->dev,
1743 ppd->sdma_descq_cnt *
1744 sizeof(struct qib_pio_header),
1745 dev->pio_hdrs, dev->pio_hdrs_phys);
1746err_hdrs:
Ralph Campbellf9315512010-05-23 21:44:54 -07001747 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
Ralph Campbellf9315512010-05-23 21:44:54 -07001748 return ret;
1749}
1750
1751void qib_unregister_ib_device(struct qib_devdata *dd)
1752{
1753 struct qib_ibdev *dev = &dd->verbs_dev;
Ralph Campbellf9315512010-05-23 21:44:54 -07001754
1755 qib_verbs_unregister_sysfs(dd);
1756
Dennis Dalessandro2dc05ab2016-01-22 12:44:29 -08001757 rvt_unregister_device(&dd->verbs_dev.rdi);
Ralph Campbellf9315512010-05-23 21:44:54 -07001758
1759 if (!list_empty(&dev->piowait))
1760 qib_dev_err(dd, "piowait list not empty!\n");
1761 if (!list_empty(&dev->dmawait))
1762 qib_dev_err(dd, "dmawait list not empty!\n");
1763 if (!list_empty(&dev->txwait))
1764 qib_dev_err(dd, "txwait list not empty!\n");
1765 if (!list_empty(&dev->memwait))
1766 qib_dev_err(dd, "memwait list not empty!\n");
Ralph Campbellf9315512010-05-23 21:44:54 -07001767
Ralph Campbellf9315512010-05-23 21:44:54 -07001768 del_timer_sync(&dev->mem_timer);
Ralph Campbellf9315512010-05-23 21:44:54 -07001769 while (!list_empty(&dev->txreq_free)) {
1770 struct list_head *l = dev->txreq_free.next;
1771 struct qib_verbs_txreq *tx;
1772
1773 list_del(l);
1774 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1775 kfree(tx);
1776 }
1777 if (dd->pport->sdma_descq_cnt)
1778 dma_free_coherent(&dd->pcidev->dev,
1779 dd->pport->sdma_descq_cnt *
1780 sizeof(struct qib_pio_header),
1781 dev->pio_hdrs, dev->pio_hdrs_phys);
Ralph Campbellf9315512010-05-23 21:44:54 -07001782}
Mike Marciniszyn551ace12012-07-19 13:03:56 +00001783
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001784/**
1785 * _qib_schedule_send - schedule progress
1786 * @qp - the qp
1787 *
1788 * This schedules progress w/o regard to the s_flags.
1789 *
1790 * It is only used in post send, which doesn't hold
1791 * the s_lock.
1792 */
1793void _qib_schedule_send(struct rvt_qp *qp)
1794{
1795 struct qib_ibport *ibp =
1796 to_iport(qp->ibqp.device, qp->port_num);
1797 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1798 struct qib_qp_priv *priv = qp->priv;
1799
1800 queue_work(ppd->qib_wq, &priv->s_work);
1801}
1802
1803/**
1804 * qib_schedule_send - schedule progress
1805 * @qp - the qp
1806 *
1807 * This schedules qp progress. The s_lock
1808 * should be held.
Mike Marciniszyn551ace12012-07-19 13:03:56 +00001809 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -08001810void qib_schedule_send(struct rvt_qp *qp)
Mike Marciniszyn551ace12012-07-19 13:03:56 +00001811{
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001812 if (qib_send_ok(qp))
1813 _qib_schedule_send(qp);
Mike Marciniszyn551ace12012-07-19 13:03:56 +00001814}