blob: 2f6323ad9c598561585ff1a00323c39265f92ac6 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Mitko Haralanova74d5302018-05-02 06:43:24 -07002 * Copyright(c) 2015 - 2018 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <rdma/ib_mad.h>
49#include <rdma/ib_user_verbs.h>
50#include <linux/io.h>
51#include <linux/module.h>
52#include <linux/utsname.h>
53#include <linux/rculist.h>
54#include <linux/mm.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040055#include <linux/vmalloc.h>
Don Hiatt13c19222017-08-04 13:53:51 -070056#include <rdma/opa_addr.h>
Gustavo A. R. Silva6497d0a2019-07-31 12:54:28 -050057#include <linux/nospec.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040058
59#include "hfi.h"
60#include "common.h"
61#include "device.h"
62#include "trace.h"
63#include "qp.h"
Mike Marciniszyn45842ab2016-02-14 12:44:34 -080064#include "verbs_txreq.h"
Don Hiatt0181ce32017-03-20 17:26:14 -070065#include "debugfs.h"
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070066#include "vnic.h"
Mitko Haralanova74d5302018-05-02 06:43:24 -070067#include "fault.h"
Sebastian Sanchez5d18ee62018-05-02 06:43:55 -070068#include "affinity.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040069
Dennis Dalessandro895420d2016-01-19 14:42:28 -080070static unsigned int hfi1_lkey_table_size = 16;
Mike Marciniszyn77241052015-07-30 15:17:43 -040071module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
72 S_IRUGO);
73MODULE_PARM_DESC(lkey_table_size,
74 "LKEY table size in bits (2^n, 1 <= n <= 23)");
75
76static unsigned int hfi1_max_pds = 0xFFFF;
77module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
78MODULE_PARM_DESC(max_pds,
79 "Maximum number of protection domains to support");
80
81static unsigned int hfi1_max_ahs = 0xFFFF;
82module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
83MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
84
Jianxin Xiongf6aa78352016-09-25 07:41:18 -070085unsigned int hfi1_max_cqes = 0x2FFFFF;
Mike Marciniszyn77241052015-07-30 15:17:43 -040086module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
87MODULE_PARM_DESC(max_cqes,
88 "Maximum number of completion queue entries to support");
89
90unsigned int hfi1_max_cqs = 0x1FFFF;
91module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
92MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
93
94unsigned int hfi1_max_qp_wrs = 0x3FFF;
95module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
96MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
97
Jianxin Xiongf6aa78352016-09-25 07:41:18 -070098unsigned int hfi1_max_qps = 32768;
Mike Marciniszyn77241052015-07-30 15:17:43 -040099module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
100MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
101
102unsigned int hfi1_max_sges = 0x60;
103module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
104MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
105
106unsigned int hfi1_max_mcast_grps = 16384;
107module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
108MODULE_PARM_DESC(max_mcast_grps,
109 "Maximum number of multicast groups to support");
110
111unsigned int hfi1_max_mcast_qp_attached = 16;
112module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
113 uint, S_IRUGO);
114MODULE_PARM_DESC(max_mcast_qp_attached,
115 "Maximum number of attached QPs to support");
116
117unsigned int hfi1_max_srqs = 1024;
118module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
119MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
120
121unsigned int hfi1_max_srq_sges = 128;
122module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
123MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
124
125unsigned int hfi1_max_srq_wrs = 0x1FFFF;
126module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
127MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
128
Mike Marciniszynd0e859c2016-03-07 11:35:46 -0800129unsigned short piothreshold = 256;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800130module_param(piothreshold, ushort, S_IRUGO);
131MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
132
Dean Luick528ee9f2016-03-05 08:50:43 -0800133static unsigned int sge_copy_mode;
134module_param(sge_copy_mode, uint, S_IRUGO);
135MODULE_PARM_DESC(sge_copy_mode,
136 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
137
Mike Marciniszyn77241052015-07-30 15:17:43 -0400138static void verbs_sdma_complete(
139 struct sdma_txreq *cookie,
Mike Marciniszyna545f532016-02-14 12:45:53 -0800140 int status);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400141
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800142static int pio_wait(struct rvt_qp *qp,
143 struct send_context *sc,
144 struct hfi1_pkt_state *ps,
145 u32 flag);
146
Jubin John64ffd862015-10-26 10:28:47 -0400147/* Length of buffer to create verbs txreq cache name */
148#define TXREQ_NAME_LEN 24
149
Brian Welty019f1182018-09-26 10:44:33 -0700150static uint wss_threshold = 80;
Dean Luick528ee9f2016-03-05 08:50:43 -0800151module_param(wss_threshold, uint, S_IRUGO);
152MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
153static uint wss_clean_period = 256;
154module_param(wss_clean_period, uint, S_IRUGO);
155MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
156
Mike Marciniszyn77241052015-07-30 15:17:43 -0400157/*
Mike Marciniszyn43a474a2017-03-20 17:25:04 -0700158 * Translate ib_wr_opcode into ib_wc_opcode.
159 */
160const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
161 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
Kaike Wan3c6cb202019-01-23 21:51:39 -0800162 [IB_WR_TID_RDMA_WRITE] = IB_WC_RDMA_WRITE,
Mike Marciniszyn43a474a2017-03-20 17:25:04 -0700163 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
164 [IB_WR_SEND] = IB_WC_SEND,
165 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
166 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
Kaike Wan24b11922019-01-23 19:32:09 -0800167 [IB_WR_TID_RDMA_READ] = IB_WC_RDMA_READ,
Mike Marciniszyn43a474a2017-03-20 17:25:04 -0700168 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
169 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
170 [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
171 [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
172 [IB_WR_REG_MR] = IB_WC_REG_MR
173};
174
175/*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400176 * Length of header by opcode, 0 --> not supported
177 */
178const u8 hdr_len_by_opcode[256] = {
179 /* RC */
180 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
181 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
182 [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
183 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
184 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
185 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
186 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
187 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
188 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
189 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
190 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
191 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
192 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
193 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
194 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
195 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
196 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
197 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
Mike Marciniszyn37aab622016-09-30 20:11:15 -0700198 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4 + 8,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400199 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
200 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
Jianxin Xiongbdd8a982016-05-24 12:50:17 -0700201 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
202 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
Kaike Wan22d136d2019-01-24 06:36:34 -0800203 [IB_OPCODE_TID_RDMA_READ_REQ] = 12 + 8 + 36,
204 [IB_OPCODE_TID_RDMA_READ_RESP] = 12 + 8 + 36,
Kaike Wan3c6cb202019-01-23 21:51:39 -0800205 [IB_OPCODE_TID_RDMA_WRITE_REQ] = 12 + 8 + 36,
206 [IB_OPCODE_TID_RDMA_WRITE_RESP] = 12 + 8 + 36,
207 [IB_OPCODE_TID_RDMA_WRITE_DATA] = 12 + 8 + 36,
208 [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = 12 + 8 + 36,
209 [IB_OPCODE_TID_RDMA_ACK] = 12 + 8 + 36,
210 [IB_OPCODE_TID_RDMA_RESYNC] = 12 + 8 + 36,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400211 /* UC */
212 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
213 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
214 [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
215 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
216 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
217 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
218 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
219 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
220 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
221 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
222 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
223 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
224 /* UD */
225 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
226 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
227};
228
229static const opcode_handler opcode_handler_tbl[256] = {
230 /* RC */
231 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
232 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
233 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
234 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
235 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
236 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
237 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
238 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
239 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
240 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
241 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
242 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
243 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
244 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
245 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
246 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
247 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
248 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
249 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
250 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
251 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
Jianxin Xionga2df0c82016-07-25 13:38:31 -0700252 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv,
253 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv,
Kaike Wan22d136d2019-01-24 06:36:34 -0800254
255 /* TID RDMA has separate handlers for different opcodes.*/
Kaike Wan3c6cb202019-01-23 21:51:39 -0800256 [IB_OPCODE_TID_RDMA_WRITE_REQ] = &hfi1_rc_rcv_tid_rdma_write_req,
257 [IB_OPCODE_TID_RDMA_WRITE_RESP] = &hfi1_rc_rcv_tid_rdma_write_resp,
258 [IB_OPCODE_TID_RDMA_WRITE_DATA] = &hfi1_rc_rcv_tid_rdma_write_data,
259 [IB_OPCODE_TID_RDMA_WRITE_DATA_LAST] = &hfi1_rc_rcv_tid_rdma_write_data,
Kaike Wan22d136d2019-01-24 06:36:34 -0800260 [IB_OPCODE_TID_RDMA_READ_REQ] = &hfi1_rc_rcv_tid_rdma_read_req,
261 [IB_OPCODE_TID_RDMA_READ_RESP] = &hfi1_rc_rcv_tid_rdma_read_resp,
Kaike Wan3c6cb202019-01-23 21:51:39 -0800262 [IB_OPCODE_TID_RDMA_RESYNC] = &hfi1_rc_rcv_tid_rdma_resync,
263 [IB_OPCODE_TID_RDMA_ACK] = &hfi1_rc_rcv_tid_rdma_ack,
Kaike Wan22d136d2019-01-24 06:36:34 -0800264
Mike Marciniszyn77241052015-07-30 15:17:43 -0400265 /* UC */
266 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
267 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
268 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
269 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
270 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
271 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
272 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
273 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
274 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
275 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
276 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
277 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
278 /* UD */
279 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
280 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
281 /* CNP */
282 [IB_OPCODE_CNP] = &hfi1_cnp_rcv
283};
284
Mike Marciniszynb374e062016-09-25 07:40:58 -0700285#define OPMASK 0x1f
286
287static const u32 pio_opmask[BIT(3)] = {
288 /* RC */
289 [IB_OPCODE_RC >> 5] =
290 BIT(RC_OP(SEND_ONLY) & OPMASK) |
291 BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
292 BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) |
293 BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) |
294 BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) |
295 BIT(RC_OP(ACKNOWLEDGE) & OPMASK) |
296 BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) |
297 BIT(RC_OP(COMPARE_SWAP) & OPMASK) |
298 BIT(RC_OP(FETCH_ADD) & OPMASK),
299 /* UC */
300 [IB_OPCODE_UC >> 5] =
301 BIT(UC_OP(SEND_ONLY) & OPMASK) |
302 BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
303 BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) |
304 BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK),
305};
306
Mike Marciniszyn77241052015-07-30 15:17:43 -0400307/*
308 * System image GUID.
309 */
310__be64 ib_hfi1_sys_image_guid;
311
Mike Marciniszyn77241052015-07-30 15:17:43 -0400312/*
313 * Make sure the QP is ready and able to accept the given opcode.
314 */
Don Hiatt90397462017-05-12 09:20:20 -0700315static inline opcode_handler qp_ok(struct hfi1_packet *packet)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400316{
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800317 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700318 return NULL;
Don Hiatt90397462017-05-12 09:20:20 -0700319 if (((packet->opcode & RVT_OPCODE_QP_MASK) ==
320 packet->qp->allowed_ops) ||
321 (packet->opcode == IB_OPCODE_CNP))
322 return opcode_handler_tbl[packet->opcode];
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700323
324 return NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400325}
326
Don Hiatt243d9f42017-03-20 17:26:20 -0700327static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
328{
329#ifdef CONFIG_FAULT_INJECTION
Kaike Wan6b6cf932019-01-23 19:30:51 -0800330 if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP) {
Don Hiatt243d9f42017-03-20 17:26:20 -0700331 /*
332 * In order to drop non-IB traffic we
333 * set PbcInsertHrc to NONE (0x2).
334 * The packet will still be delivered
335 * to the receiving node but a
336 * KHdrHCRCErr (KDETH packet with a bad
337 * HCRC) will be triggered and the
338 * packet will not be delivered to the
339 * correct context.
340 */
Kaike Wan6b6cf932019-01-23 19:30:51 -0800341 pbc &= ~PBC_INSERT_HCRC_SMASK;
Don Hiatt243d9f42017-03-20 17:26:20 -0700342 pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
Kaike Wan6b6cf932019-01-23 19:30:51 -0800343 } else {
Don Hiatt243d9f42017-03-20 17:26:20 -0700344 /*
345 * In order to drop regular verbs
346 * traffic we set the PbcTestEbp
347 * flag. The packet will still be
348 * delivered to the receiving node but
349 * a 'late ebp error' will be
350 * triggered and will be dropped.
351 */
352 pbc |= PBC_TEST_EBP;
Kaike Wan6b6cf932019-01-23 19:30:51 -0800353 }
Don Hiatt243d9f42017-03-20 17:26:20 -0700354#endif
355 return pbc;
356}
357
Kaike Wan22d136d2019-01-24 06:36:34 -0800358static opcode_handler tid_qp_ok(int opcode, struct hfi1_packet *packet)
359{
360 if (packet->qp->ibqp.qp_type != IB_QPT_RC ||
361 !(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
362 return NULL;
363 if ((opcode & RVT_OPCODE_QP_MASK) == IB_OPCODE_TID_RDMA)
364 return opcode_handler_tbl[opcode];
365 return NULL;
366}
367
368void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet)
369{
370 struct hfi1_ctxtdata *rcd = packet->rcd;
371 struct ib_header *hdr = packet->hdr;
372 u32 tlen = packet->tlen;
373 struct hfi1_pportdata *ppd = rcd->ppd;
374 struct hfi1_ibport *ibp = &ppd->ibport_data;
375 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
376 opcode_handler opcode_handler;
377 unsigned long flags;
378 u32 qp_num;
379 int lnh;
380 u8 opcode;
381
382 /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
383 if (unlikely(tlen < 15 * sizeof(u32)))
384 goto drop;
385
386 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
387 if (lnh != HFI1_LRH_BTH)
388 goto drop;
389
390 packet->ohdr = &hdr->u.oth;
391 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
392
393 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
394 inc_opstats(tlen, &rcd->opstats->stats[opcode]);
395
396 /* verbs_qp can be picked up from any tid_rdma header struct */
397 qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_req.verbs_qp) &
398 RVT_QPN_MASK;
399
400 rcu_read_lock();
401 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
402 if (!packet->qp)
403 goto drop_rcu;
404 spin_lock_irqsave(&packet->qp->r_lock, flags);
405 opcode_handler = tid_qp_ok(opcode, packet);
406 if (likely(opcode_handler))
407 opcode_handler(packet);
408 else
409 goto drop_unlock;
410 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
411 rcu_read_unlock();
412
413 return;
414drop_unlock:
415 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
416drop_rcu:
417 rcu_read_unlock();
418drop:
419 ibp->rvp.n_pkt_drops++;
420}
421
422void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet)
423{
424 struct hfi1_ctxtdata *rcd = packet->rcd;
425 struct ib_header *hdr = packet->hdr;
426 u32 tlen = packet->tlen;
427 struct hfi1_pportdata *ppd = rcd->ppd;
428 struct hfi1_ibport *ibp = &ppd->ibport_data;
429 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
430 opcode_handler opcode_handler;
431 unsigned long flags;
432 u32 qp_num;
433 int lnh;
434 u8 opcode;
435
436 /* DW == LRH (2) + BTH (3) + KDETH (9) + CRC (1) */
437 if (unlikely(tlen < 15 * sizeof(u32)))
438 goto drop;
439
440 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
441 if (lnh != HFI1_LRH_BTH)
442 goto drop;
443
444 packet->ohdr = &hdr->u.oth;
445 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
446
447 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
448 inc_opstats(tlen, &rcd->opstats->stats[opcode]);
449
450 /* verbs_qp can be picked up from any tid_rdma header struct */
451 qp_num = be32_to_cpu(packet->ohdr->u.tid_rdma.r_rsp.verbs_qp) &
452 RVT_QPN_MASK;
453
454 rcu_read_lock();
455 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
456 if (!packet->qp)
457 goto drop_rcu;
458 spin_lock_irqsave(&packet->qp->r_lock, flags);
459 opcode_handler = tid_qp_ok(opcode, packet);
460 if (likely(opcode_handler))
461 opcode_handler(packet);
462 else
463 goto drop_unlock;
464 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
465 rcu_read_unlock();
466
467 return;
468drop_unlock:
469 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
470drop_rcu:
471 rcu_read_unlock();
472drop:
473 ibp->rvp.n_pkt_drops++;
474}
475
Don Hiatt5786adf32017-08-04 13:54:10 -0700476static int hfi1_do_pkey_check(struct hfi1_packet *packet)
477{
478 struct hfi1_ctxtdata *rcd = packet->rcd;
479 struct hfi1_pportdata *ppd = rcd->ppd;
480 struct hfi1_16b_header *hdr = packet->hdr;
481 u16 pkey;
482
483 /* Pkey check needed only for bypass packets */
484 if (packet->etype != RHF_RCV_TYPE_BYPASS)
485 return 0;
486
487 /* Perform pkey check */
488 pkey = hfi1_16B_get_pkey(hdr);
489 return ingress_pkey_check(ppd, pkey, packet->sc,
490 packet->qp->s_pkey_index,
491 packet->slid, true);
492}
493
Don Hiatt90397462017-05-12 09:20:20 -0700494static inline void hfi1_handle_packet(struct hfi1_packet *packet,
495 bool is_mcast)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400496{
Don Hiatt90397462017-05-12 09:20:20 -0700497 u32 qp_num;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400498 struct hfi1_ctxtdata *rcd = packet->rcd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400499 struct hfi1_pportdata *ppd = rcd->ppd;
Sebastian Sanchezf3e862c2017-02-08 05:26:25 -0800500 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800501 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700502 opcode_handler packet_handler;
Dean Luickb77d7132015-10-26 10:28:43 -0400503 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400504
Don Hiatt90397462017-05-12 09:20:20 -0700505 inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400506
Don Hiatt90397462017-05-12 09:20:20 -0700507 if (unlikely(is_mcast)) {
Dennis Dalessandro0facc5a2016-01-19 14:43:39 -0800508 struct rvt_mcast *mcast;
509 struct rvt_mcast_qp *p;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400510
Don Hiatt90397462017-05-12 09:20:20 -0700511 if (!packet->grh)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400512 goto drop;
Don Hiatt90397462017-05-12 09:20:20 -0700513 mcast = rvt_mcast_find(&ibp->rvp,
514 &packet->grh->dgid,
Don Hiatt72c07e22017-08-04 13:53:58 -0700515 opa_get_lid(packet->dlid, 9B));
Jubin Johnd125a6c2016-02-14 20:19:49 -0800516 if (!mcast)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400517 goto drop;
Dennis Dalessandro817a68a2020-02-25 14:54:45 -0500518 rcu_read_lock();
Mike Marciniszyn77241052015-07-30 15:17:43 -0400519 list_for_each_entry_rcu(p, &mcast->qp_list, list) {
520 packet->qp = p->qp;
Don Hiatt5786adf32017-08-04 13:54:10 -0700521 if (hfi1_do_pkey_check(packet))
Dennis Dalessandro817a68a2020-02-25 14:54:45 -0500522 goto unlock_drop;
Dean Luickb77d7132015-10-26 10:28:43 -0400523 spin_lock_irqsave(&packet->qp->r_lock, flags);
Don Hiatt90397462017-05-12 09:20:20 -0700524 packet_handler = qp_ok(packet);
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700525 if (likely(packet_handler))
526 packet_handler(packet);
527 else
528 ibp->rvp.n_pkt_drops++;
Dean Luickb77d7132015-10-26 10:28:43 -0400529 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400530 }
Dennis Dalessandro817a68a2020-02-25 14:54:45 -0500531 rcu_read_unlock();
Mike Marciniszyn77241052015-07-30 15:17:43 -0400532 /*
Dennis Dalessandro0facc5a2016-01-19 14:43:39 -0800533 * Notify rvt_multicast_detach() if it is waiting for us
Mike Marciniszyn77241052015-07-30 15:17:43 -0400534 * to finish.
535 */
536 if (atomic_dec_return(&mcast->refcount) <= 1)
537 wake_up(&mcast->wait);
538 } else {
Don Hiatt90397462017-05-12 09:20:20 -0700539 /* Get the destination QP number. */
Don Hiatt81cd3892018-05-15 18:28:15 -0700540 if (packet->etype == RHF_RCV_TYPE_BYPASS &&
541 hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM)
542 qp_num = hfi1_16B_get_dest_qpn(packet->mgmt);
543 else
544 qp_num = ib_bth_get_qpn(packet->ohdr);
545
Mike Marciniszyn77241052015-07-30 15:17:43 -0400546 rcu_read_lock();
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800547 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
Don Hiatt5786adf32017-08-04 13:54:10 -0700548 if (!packet->qp)
549 goto unlock_drop;
550
551 if (hfi1_do_pkey_check(packet))
552 goto unlock_drop;
553
Dean Luickb77d7132015-10-26 10:28:43 -0400554 spin_lock_irqsave(&packet->qp->r_lock, flags);
Don Hiatt90397462017-05-12 09:20:20 -0700555 packet_handler = qp_ok(packet);
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700556 if (likely(packet_handler))
557 packet_handler(packet);
558 else
559 ibp->rvp.n_pkt_drops++;
Dean Luickb77d7132015-10-26 10:28:43 -0400560 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400561 rcu_read_unlock();
562 }
563 return;
Don Hiatt5786adf32017-08-04 13:54:10 -0700564unlock_drop:
565 rcu_read_unlock();
Mike Marciniszyn77241052015-07-30 15:17:43 -0400566drop:
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800567 ibp->rvp.n_pkt_drops++;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400568}
569
Don Hiatt90397462017-05-12 09:20:20 -0700570/**
571 * hfi1_ib_rcv - process an incoming packet
572 * @packet: data packet information
573 *
574 * This is called to process an incoming packet at interrupt level.
575 */
576void hfi1_ib_rcv(struct hfi1_packet *packet)
577{
578 struct hfi1_ctxtdata *rcd = packet->rcd;
Don Hiatt90397462017-05-12 09:20:20 -0700579
Don Hiatt72c07e22017-08-04 13:53:58 -0700580 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
581 hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
582}
Don Hiatt90397462017-05-12 09:20:20 -0700583
Don Hiatt72c07e22017-08-04 13:53:58 -0700584void hfi1_16B_rcv(struct hfi1_packet *packet)
585{
586 struct hfi1_ctxtdata *rcd = packet->rcd;
587
588 trace_input_ibhdr(rcd->dd, packet, false);
589 hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
Don Hiatt90397462017-05-12 09:20:20 -0700590}
591
Mike Marciniszyn77241052015-07-30 15:17:43 -0400592/*
593 * This is called from a timer to check for QPs
594 * which need kernel memory in order to send a packet.
595 */
Kees Cook80641352017-10-16 15:51:54 -0700596static void mem_timer(struct timer_list *t)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400597{
Kees Cook80641352017-10-16 15:51:54 -0700598 struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400599 struct list_head *list = &dev->memwait;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800600 struct rvt_qp *qp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400601 struct iowait *wait;
602 unsigned long flags;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800603 struct hfi1_qp_priv *priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400604
605 write_seqlock_irqsave(&dev->iowait_lock, flags);
606 if (!list_empty(list)) {
607 wait = list_first_entry(list, struct iowait, list);
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800608 qp = iowait_to_qp(wait);
609 priv = qp->priv;
610 list_del_init(&priv->s_iowait.list);
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700611 priv->s_iowait.lock = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400612 /* refcount held until actual wake up */
613 if (!list_empty(list))
614 mod_timer(&dev->mem_timer, jiffies + 1);
615 }
616 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
617
618 if (qp)
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800619 hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400620}
621
Mike Marciniszyn77241052015-07-30 15:17:43 -0400622/*
623 * This is called with progress side lock held.
624 */
625/* New API */
626static void verbs_sdma_complete(
627 struct sdma_txreq *cookie,
Mike Marciniszyna545f532016-02-14 12:45:53 -0800628 int status)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400629{
630 struct verbs_txreq *tx =
631 container_of(cookie, struct verbs_txreq, txreq);
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800632 struct rvt_qp *qp = tx->qp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400633
634 spin_lock(&qp->s_lock);
Jubin Johne4909742016-02-14 20:22:00 -0800635 if (tx->wqe) {
Venkata Sandeep Dhanalakota116aa032018-09-26 10:44:42 -0700636 rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
Jubin Johne4909742016-02-14 20:22:00 -0800637 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
Don Hiatt30e07412017-08-04 13:54:04 -0700638 struct hfi1_opa_header *hdr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400639
640 hdr = &tx->phdr.hdr;
Mike Marciniszyn4bb02e92019-06-14 12:32:44 -0400641 if (unlikely(status == SDMA_TXREQ_S_ABORTED))
642 hfi1_rc_verbs_aborted(qp, hdr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400643 hfi1_rc_send_complete(qp, hdr);
644 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400645 spin_unlock(&qp->s_lock);
646
647 hfi1_put_txreq(tx);
648}
649
Kaike Wan838b6fd2019-01-23 19:30:07 -0800650void hfi1_wait_kmem(struct rvt_qp *qp)
651{
652 struct hfi1_qp_priv *priv = qp->priv;
653 struct ib_qp *ibqp = &qp->ibqp;
654 struct ib_device *ibdev = ibqp->device;
655 struct hfi1_ibdev *dev = to_idev(ibdev);
656
657 if (list_empty(&priv->s_iowait.list)) {
658 if (list_empty(&dev->memwait))
659 mod_timer(&dev->mem_timer, jiffies + 1);
660 qp->s_flags |= RVT_S_WAIT_KMEM;
661 list_add_tail(&priv->s_iowait.list, &dev->memwait);
662 priv->s_iowait.lock = &dev->iowait_lock;
663 trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
664 rvt_get_qp(qp);
665 }
666}
667
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800668static int wait_kmem(struct hfi1_ibdev *dev,
669 struct rvt_qp *qp,
670 struct hfi1_pkt_state *ps)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400671{
672 unsigned long flags;
673 int ret = 0;
674
675 spin_lock_irqsave(&qp->s_lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800676 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400677 write_seqlock(&dev->iowait_lock);
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800678 list_add_tail(&ps->s_txreq->txreq.list,
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700679 &ps->wait->tx_head);
Kaike Wan838b6fd2019-01-23 19:30:07 -0800680 hfi1_wait_kmem(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400681 write_sequnlock(&dev->iowait_lock);
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700682 hfi1_qp_unbusy(qp, ps->wait);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400683 ret = -EBUSY;
684 }
685 spin_unlock_irqrestore(&qp->s_lock, flags);
686
687 return ret;
688}
689
690/*
691 * This routine calls txadds for each sg entry.
692 *
693 * Add failures will revert the sge cursor
694 */
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800695static noinline int build_verbs_ulp_payload(
Mike Marciniszyn77241052015-07-30 15:17:43 -0400696 struct sdma_engine *sde,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400697 u32 length,
698 struct verbs_txreq *tx)
699{
Mitko Haralanovb777f152016-12-07 19:33:27 -0800700 struct rvt_sge_state *ss = tx->ss;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800701 struct rvt_sge *sg_list = ss->sg_list;
702 struct rvt_sge sge = ss->sge;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400703 u8 num_sge = ss->num_sge;
704 u32 len;
705 int ret = 0;
706
707 while (length) {
Michael J. Ruhl87fc34b2019-01-23 19:08:19 -0800708 len = rvt_get_sge_length(&ss->sge, length);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400709 WARN_ON_ONCE(len == 0);
710 ret = sdma_txadd_kvaddr(
711 sde->dd,
712 &tx->txreq,
713 ss->sge.vaddr,
714 len);
715 if (ret)
716 goto bail_txadd;
Brian Welty1198fce2017-02-08 05:27:37 -0800717 rvt_update_sge(ss, len, false);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400718 length -= len;
719 }
720 return ret;
721bail_txadd:
722 /* unwind cursor */
723 ss->sge = sge;
724 ss->num_sge = num_sge;
725 ss->sg_list = sg_list;
726 return ret;
727}
728
Mike Marciniszyn1b311f82017-10-23 06:06:08 -0700729/**
730 * update_tx_opstats - record stats by opcode
731 * @qp; the qp
732 * @ps: transmit packet state
733 * @plen: the plen in dwords
734 *
735 * This is a routine to record the tx opstats after a
736 * packet has been presented to the egress mechanism.
737 */
738static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
739 u32 plen)
740{
741#ifdef CONFIG_DEBUG_FS
742 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
743 struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
744
745 inc_opstats(plen * 4, &s->stats[ps->opcode]);
746 put_cpu_ptr(s);
747#endif
748}
749
Mike Marciniszyn77241052015-07-30 15:17:43 -0400750/*
751 * Build the number of DMA descriptors needed to send length bytes of data.
752 *
753 * NOTE: DMA mapping is held in the tx until completed in the ring or
754 * the tx desc is freed without having been submitted to the ring
755 *
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800756 * This routine ensures all the helper routine calls succeed.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400757 */
758/* New API */
759static int build_verbs_tx_desc(
760 struct sdma_engine *sde,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400761 u32 length,
762 struct verbs_txreq *tx,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700763 struct hfi1_ahg_info *ahg_info,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400764 u64 pbc)
765{
766 int ret = 0;
Don Hiattd4d602e2016-07-25 13:40:22 -0700767 struct hfi1_sdma_header *phdr = &tx->phdr;
Mitko Haralanov96362582018-02-01 10:46:07 -0800768 u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2;
Don Hiatt566d53a2017-08-04 13:54:47 -0700769 u8 extra_bytes = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400770
Don Hiatt566d53a2017-08-04 13:54:47 -0700771 if (tx->phdr.hdr.hdr_type) {
772 /*
773 * hdrbytes accounts for PBC. Need to subtract 8 bytes
774 * before calculating padding.
775 */
776 extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
777 (SIZE_OF_CRC << 2) + SIZE_OF_LT;
Don Hiatt566d53a2017-08-04 13:54:47 -0700778 }
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700779 if (!ahg_info->ahgcount) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400780 ret = sdma_txinit_ahg(
781 &tx->txreq,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700782 ahg_info->tx_flags,
Don Hiatt566d53a2017-08-04 13:54:47 -0700783 hdrbytes + length +
784 extra_bytes,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700785 ahg_info->ahgidx,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400786 0,
787 NULL,
788 0,
789 verbs_sdma_complete);
790 if (ret)
791 goto bail_txadd;
792 phdr->pbc = cpu_to_le64(pbc);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400793 ret = sdma_txadd_kvaddr(
794 sde->dd,
795 &tx->txreq,
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800796 phdr,
797 hdrbytes);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400798 if (ret)
799 goto bail_txadd;
800 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400801 ret = sdma_txinit_ahg(
802 &tx->txreq,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700803 ahg_info->tx_flags,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400804 length,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700805 ahg_info->ahgidx,
806 ahg_info->ahgcount,
807 ahg_info->ahgdesc,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400808 hdrbytes,
809 verbs_sdma_complete);
810 if (ret)
811 goto bail_txadd;
812 }
Mitko Haralanovb777f152016-12-07 19:33:27 -0800813 /* add the ulp payload - if any. tx->ss can be NULL for acks */
Don Hiatt566d53a2017-08-04 13:54:47 -0700814 if (tx->ss) {
Mitko Haralanovb777f152016-12-07 19:33:27 -0800815 ret = build_verbs_ulp_payload(sde, length, tx);
Don Hiatt566d53a2017-08-04 13:54:47 -0700816 if (ret)
817 goto bail_txadd;
818 }
819
820 /* add icrc, lt byte, and padding to flit */
Don Hiattf8195f32017-10-09 12:38:19 -0700821 if (extra_bytes)
Mike Marciniszyn22bb1362019-10-04 16:49:34 -0400822 ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
823 sde->dd->sdma_pad_phys, extra_bytes);
Don Hiatt566d53a2017-08-04 13:54:47 -0700824
Mike Marciniszyn77241052015-07-30 15:17:43 -0400825bail_txadd:
826 return ret;
827}
828
Kaike Wan6b6cf932019-01-23 19:30:51 -0800829static u64 update_hcrc(u8 opcode, u64 pbc)
830{
831 if ((opcode & IB_OPCODE_TID_RDMA) == IB_OPCODE_TID_RDMA) {
832 pbc &= ~PBC_INSERT_HCRC_SMASK;
833 pbc |= (u64)PBC_IHCRC_LKDETH << PBC_INSERT_HCRC_SHIFT;
834 }
835 return pbc;
836}
837
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800838int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500839 u64 pbc)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400840{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800841 struct hfi1_qp_priv *priv = qp->priv;
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700842 struct hfi1_ahg_info *ahg_info = priv->s_ahg;
Mitko Haralanov96362582018-02-01 10:46:07 -0800843 u32 hdrwords = ps->s_txreq->hdr_dwords;
Don Hiatte922ae02016-12-07 19:33:00 -0800844 u32 len = ps->s_txreq->s_cur_size;
Don Hiatt566d53a2017-08-04 13:54:47 -0700845 u32 plen;
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500846 struct hfi1_ibdev *dev = ps->dev;
847 struct hfi1_pportdata *ppd = ps->ppd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400848 struct verbs_txreq *tx;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800849 u8 sc5 = priv->s_sc;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400850 int ret;
Don Hiatt566d53a2017-08-04 13:54:47 -0700851 u32 dwords;
Don Hiatt566d53a2017-08-04 13:54:47 -0700852
853 if (ps->s_txreq->phdr.hdr.hdr_type) {
854 u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
855
856 dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
857 SIZE_OF_LT) >> 2;
Don Hiatt566d53a2017-08-04 13:54:47 -0700858 } else {
859 dwords = (len + 3) >> 2;
860 }
Mitko Haralanov96362582018-02-01 10:46:07 -0800861 plen = hdrwords + dwords + sizeof(pbc) / 4;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400862
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800863 tx = ps->s_txreq;
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800864 if (!sdma_txreq_built(&tx->txreq)) {
865 if (likely(pbc == 0)) {
866 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
Don Hiatt243d9f42017-03-20 17:26:20 -0700867
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800868 /* No vl15 here */
Don Hiatt566d53a2017-08-04 13:54:47 -0700869 /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
870 if (ps->s_txreq->phdr.hdr.hdr_type)
871 pbc |= PBC_PACKET_BYPASS |
872 PBC_INSERT_BYPASS_ICRC;
873 else
874 pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800875
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800876 pbc = create_pbc(ppd,
Don Hiatt243d9f42017-03-20 17:26:20 -0700877 pbc,
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800878 qp->srate_mbps,
879 vl,
880 plen);
Kaike Wan6b6cf932019-01-23 19:30:51 -0800881
Kaike Wanb2590bd2019-07-15 12:45:46 -0400882 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
883 pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
884 else
885 /* Update HCRC based on packet opcode */
886 pbc = update_hcrc(ps->opcode, pbc);
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800887 }
888 tx->wqe = qp->s_wqe;
Mitko Haralanovb777f152016-12-07 19:33:27 -0800889 ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800890 if (unlikely(ret))
891 goto bail_build;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400892 }
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700893 ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent);
Mike Marciniszyn5326dfb2016-03-07 11:35:24 -0800894 if (unlikely(ret < 0)) {
895 if (ret == -ECOMM)
896 goto bail_ecomm;
897 return ret;
898 }
Mike Marciniszyn1b311f82017-10-23 06:06:08 -0700899
900 update_tx_opstats(qp, ps, plen);
Mike Marciniszyn1db78ee2016-03-07 11:35:19 -0800901 trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
Don Hiatt228d2af2017-05-12 09:20:08 -0700902 &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
Mike Marciniszyn77241052015-07-30 15:17:43 -0400903 return ret;
904
Mike Marciniszyn77241052015-07-30 15:17:43 -0400905bail_ecomm:
906 /* The current one got "sent" */
907 return 0;
908bail_build:
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800909 ret = wait_kmem(dev, qp, ps);
910 if (!ret) {
911 /* free txreq - bad state */
912 hfi1_put_txreq(ps->s_txreq);
913 ps->s_txreq = NULL;
914 }
915 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400916}
917
918/*
919 * If we are now in the error state, return zero to flush the
920 * send work request.
921 */
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800922static int pio_wait(struct rvt_qp *qp,
923 struct send_context *sc,
924 struct hfi1_pkt_state *ps,
925 u32 flag)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400926{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800927 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400928 struct hfi1_devdata *dd = sc->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400929 unsigned long flags;
930 int ret = 0;
931
932 /*
933 * Note that as soon as want_buffer() is called and
934 * possibly before it returns, sc_piobufavail()
935 * could be called. Therefore, put QP on the I/O wait list before
936 * enabling the PIO avail interrupt.
937 */
938 spin_lock_irqsave(&qp->s_lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800939 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Mike Marciniszyn9aefcabe2018-11-28 10:33:00 -0800940 write_seqlock(&sc->waitlock);
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800941 list_add_tail(&ps->s_txreq->txreq.list,
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700942 &ps->wait->tx_head);
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800943 if (list_empty(&priv->s_iowait.list)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400944 struct hfi1_ibdev *dev = &dd->verbs_dev;
945 int was_empty;
946
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800947 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
Mike Marciniszyn2e2ba092018-06-04 11:44:02 -0700948 dev->n_piodrain += !!(flag & HFI1_S_WAIT_PIO_DRAIN);
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800949 qp->s_flags |= flag;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400950 was_empty = list_empty(&sc->piowait);
Kaike Wan34025fb2019-01-23 21:52:19 -0800951 iowait_get_priority(&priv->s_iowait);
Kaike Wanbcad2912017-07-24 07:45:37 -0700952 iowait_queue(ps->pkts_sent, &priv->s_iowait,
953 &sc->piowait);
Mike Marciniszyn9aefcabe2018-11-28 10:33:00 -0800954 priv->s_iowait.lock = &sc->waitlock;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800955 trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -0700956 rvt_get_qp(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400957 /* counting: only call wantpiobuf_intr if first user */
958 if (was_empty)
959 hfi1_sc_wantpiobuf_intr(sc, 1);
960 }
Mike Marciniszyn9aefcabe2018-11-28 10:33:00 -0800961 write_sequnlock(&sc->waitlock);
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700962 hfi1_qp_unbusy(qp, ps->wait);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400963 ret = -EBUSY;
964 }
965 spin_unlock_irqrestore(&qp->s_lock, flags);
966 return ret;
967}
968
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800969static void verbs_pio_complete(void *arg, int code)
970{
971 struct rvt_qp *qp = (struct rvt_qp *)arg;
972 struct hfi1_qp_priv *priv = qp->priv;
973
974 if (iowait_pio_dec(&priv->s_iowait))
975 iowait_drain_wakeup(&priv->s_iowait);
976}
977
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800978int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500979 u64 pbc)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400980{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800981 struct hfi1_qp_priv *priv = qp->priv;
Mitko Haralanov96362582018-02-01 10:46:07 -0800982 u32 hdrwords = ps->s_txreq->hdr_dwords;
Mitko Haralanovb777f152016-12-07 19:33:27 -0800983 struct rvt_sge_state *ss = ps->s_txreq->ss;
Don Hiatte922ae02016-12-07 19:33:00 -0800984 u32 len = ps->s_txreq->s_cur_size;
Don Hiatt566d53a2017-08-04 13:54:47 -0700985 u32 dwords;
986 u32 plen;
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500987 struct hfi1_pportdata *ppd = ps->ppd;
Don Hiatt566d53a2017-08-04 13:54:47 -0700988 u32 *hdr;
Mike Marciniszyn4f8cc5c2016-02-14 12:45:27 -0800989 u8 sc5;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400990 unsigned long flags = 0;
991 struct send_context *sc;
992 struct pio_buf *pbuf;
993 int wc_status = IB_WC_SUCCESS;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800994 int ret = 0;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800995 pio_release_cb cb = NULL;
Don Hiatt566d53a2017-08-04 13:54:47 -0700996 u8 extra_bytes = 0;
997
998 if (ps->s_txreq->phdr.hdr.hdr_type) {
999 u8 pad_size = hfi1_get_16b_padding((hdrwords << 2), len);
1000
1001 extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
1002 dwords = (len + extra_bytes) >> 2;
1003 hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
Don Hiatt566d53a2017-08-04 13:54:47 -07001004 } else {
1005 dwords = (len + 3) >> 2;
1006 hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
1007 }
Mitko Haralanov96362582018-02-01 10:46:07 -08001008 plen = hdrwords + dwords + sizeof(pbc) / 4;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001009
1010 /* only RC/UC use complete */
1011 switch (qp->ibqp.qp_type) {
1012 case IB_QPT_RC:
1013 case IB_QPT_UC:
1014 cb = verbs_pio_complete;
1015 break;
1016 default:
1017 break;
1018 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001019
1020 /* vl15 special case taken care of in ud.c */
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -08001021 sc5 = priv->s_sc;
Mike Marciniszyncef504c2016-03-07 11:35:35 -08001022 sc = ps->s_txreq->psc;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001023
Mike Marciniszyn77241052015-07-30 15:17:43 -04001024 if (likely(pbc == 0)) {
Mike Marciniszyn4f8cc5c2016-02-14 12:45:27 -08001025 u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
Don Hiatt243d9f42017-03-20 17:26:20 -07001026
Don Hiatt566d53a2017-08-04 13:54:47 -07001027 /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
1028 if (ps->s_txreq->phdr.hdr.hdr_type)
1029 pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
1030 else
1031 pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
Mitko Haralanova74d5302018-05-02 06:43:24 -07001032
Kaike Wanb2590bd2019-07-15 12:45:46 -04001033 pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
Mitko Haralanova74d5302018-05-02 06:43:24 -07001034 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
Don Hiatt566d53a2017-08-04 13:54:47 -07001035 pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
Kaike Wanb2590bd2019-07-15 12:45:46 -04001036 else
1037 /* Update HCRC based on packet opcode */
1038 pbc = update_hcrc(ps->opcode, pbc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001039 }
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001040 if (cb)
1041 iowait_pio_inc(&priv->s_iowait);
1042 pbuf = sc_buffer_alloc(sc, plen, cb, qp);
Denis Efremov7b0b6922019-09-25 16:49:40 -07001043 if (IS_ERR_OR_NULL(pbuf)) {
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001044 if (cb)
1045 verbs_pio_complete(qp, 0);
Mike Marciniszyn942a8992019-06-14 12:33:06 -04001046 if (IS_ERR(pbuf)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001047 /*
1048 * If we have filled the PIO buffers to capacity and are
1049 * not in an active state this request is not going to
1050 * go out to so just complete it with an error or else a
1051 * ULP or the core may be stuck waiting.
1052 */
1053 hfi1_cdbg(
1054 PIO,
1055 "alloc failed. state not active, completing");
1056 wc_status = IB_WC_GENERAL_ERR;
1057 goto pio_bail;
1058 } else {
1059 /*
1060 * This is a normal occurrence. The PIO buffs are full
1061 * up but we are still happily sending, well we could be
1062 * so lets continue to queue the request.
1063 */
1064 hfi1_cdbg(PIO, "alloc failed. state active, queuing");
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001065 ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
Mike Marciniszyn711e1042016-02-14 12:45:18 -08001066 if (!ret)
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001067 /* txreq not queued - free */
Mike Marciniszyn711e1042016-02-14 12:45:18 -08001068 goto bail;
1069 /* tx consumed in wait */
1070 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001071 }
1072 }
1073
Don Hiatt566d53a2017-08-04 13:54:47 -07001074 if (dwords == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001075 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
1076 } else {
Don Hiatt566d53a2017-08-04 13:54:47 -07001077 seg_pio_copy_start(pbuf, pbc,
1078 hdr, hdrwords * 4);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001079 if (ss) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001080 while (len) {
1081 void *addr = ss->sge.vaddr;
Michael J. Ruhl87fc34b2019-01-23 19:08:19 -08001082 u32 slen = rvt_get_sge_length(&ss->sge, len);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001083
Brian Welty1198fce2017-02-08 05:27:37 -08001084 rvt_update_sge(ss, slen, false);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001085 seg_pio_copy_mid(pbuf, addr, slen);
1086 len -= slen;
1087 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001088 }
Don Hiattf8195f32017-10-09 12:38:19 -07001089 /* add icrc, lt byte, and padding to flit */
1090 if (extra_bytes)
Mike Marciniszyn22bb1362019-10-04 16:49:34 -04001091 seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
1092 extra_bytes);
Don Hiatt566d53a2017-08-04 13:54:47 -07001093
Don Hiatt566d53a2017-08-04 13:54:47 -07001094 seg_pio_copy_end(pbuf);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001095 }
1096
Mike Marciniszyn1b311f82017-10-23 06:06:08 -07001097 update_tx_opstats(qp, ps, plen);
Mike Marciniszyn1db78ee2016-03-07 11:35:19 -08001098 trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
Don Hiatt228d2af2017-05-12 09:20:08 -07001099 &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001100
Mike Marciniszyn77241052015-07-30 15:17:43 -04001101pio_bail:
Mike Marciniszyn4bb02e92019-06-14 12:32:44 -04001102 spin_lock_irqsave(&qp->s_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001103 if (qp->s_wqe) {
Venkata Sandeep Dhanalakota116aa032018-09-26 10:44:42 -07001104 rvt_send_complete(qp, qp->s_wqe, wc_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001105 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
Mike Marciniszyn4bb02e92019-06-14 12:32:44 -04001106 if (unlikely(wc_status == IB_WC_GENERAL_ERR))
1107 hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr);
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -08001108 hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001109 }
Mike Marciniszyn4bb02e92019-06-14 12:32:44 -04001110 spin_unlock_irqrestore(&qp->s_lock, flags);
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -08001111
1112 ret = 0;
1113
1114bail:
1115 hfi1_put_txreq(ps->s_txreq);
1116 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001117}
Geliang Tangb91cc572015-09-21 23:39:08 +08001118
Mike Marciniszyn77241052015-07-30 15:17:43 -04001119/*
1120 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001121 * being an entry from the partition key table), return 0
Mike Marciniszyn77241052015-07-30 15:17:43 -04001122 * otherwise. Use the matching criteria for egress partition keys
1123 * specified in the OPAv1 spec., section 9.1l.7.
1124 */
1125static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
1126{
1127 u16 mkey = pkey & PKEY_LOW_15_MASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001128 u16 mentry = ent & PKEY_LOW_15_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001129
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001130 if (mkey == mentry) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001131 /*
1132 * If pkey[15] is set (full partition member),
1133 * is bit 15 in the corresponding table element
1134 * clear (limited member)?
1135 */
1136 if (pkey & PKEY_MEMBER_MASK)
1137 return !!(ent & PKEY_MEMBER_MASK);
1138 return 1;
1139 }
1140 return 0;
1141}
1142
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001143/**
1144 * egress_pkey_check - check P_KEY of a packet
Don Hiatt566d53a2017-08-04 13:54:47 -07001145 * @ppd: Physical IB port data
1146 * @slid: SLID for packet
1147 * @bkey: PKEY for header
1148 * @sc5: SC for packet
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001149 * @s_pkey_index: It will be used for look up optimization for kernel contexts
1150 * only. If it is negative value, then it means user contexts is calling this
1151 * function.
1152 *
1153 * It checks if hdr's pkey is valid.
1154 *
1155 * Return: 0 on success, otherwise, 1
Mike Marciniszyn77241052015-07-30 15:17:43 -04001156 */
Don Hiatt566d53a2017-08-04 13:54:47 -07001157int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001158 u8 sc5, int8_t s_pkey_index)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001159{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001160 struct hfi1_devdata *dd;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001161 int i;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001162 int is_user_ctxt_mechanism = (s_pkey_index < 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001163
1164 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
1165 return 0;
1166
Mike Marciniszyn77241052015-07-30 15:17:43 -04001167 /* If SC15, pkey[0:14] must be 0x7fff */
1168 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1169 goto bad;
1170
Mike Marciniszyn77241052015-07-30 15:17:43 -04001171 /* Is the pkey = 0x0, or 0x8000? */
1172 if ((pkey & PKEY_LOW_15_MASK) == 0)
1173 goto bad;
1174
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001175 /*
1176 * For the kernel contexts only, if a qp is passed into the function,
1177 * the most likely matching pkey has index qp->s_pkey_index
1178 */
1179 if (!is_user_ctxt_mechanism &&
1180 egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) {
1181 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001182 }
1183
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001184 for (i = 0; i < MAX_PKEY_VALUES; i++) {
1185 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1186 return 0;
1187 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001188bad:
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001189 /*
1190 * For the user-context mechanism, the P_KEY check would only happen
1191 * once per SDMA request, not once per packet. Therefore, there's no
1192 * need to increment the counter for the user-context mechanism.
1193 */
1194 if (!is_user_ctxt_mechanism) {
1195 incr_cntr64(&ppd->port_xmit_constraint_errors);
1196 dd = ppd->dd;
1197 if (!(dd->err_info_xmit_constraint.status &
1198 OPA_EI_STATUS_SMASK)) {
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001199 dd->err_info_xmit_constraint.status |=
1200 OPA_EI_STATUS_SMASK;
1201 dd->err_info_xmit_constraint.slid = slid;
1202 dd->err_info_xmit_constraint.pkey = pkey;
1203 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001204 }
1205 return 1;
1206}
1207
1208/**
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001209 * get_send_routine - choose an egress routine
1210 *
1211 * Choose an egress routine based on QP type
1212 * and size
1213 */
1214static inline send_routine get_send_routine(struct rvt_qp *qp,
Don Hiatt566d53a2017-08-04 13:54:47 -07001215 struct hfi1_pkt_state *ps)
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001216{
1217 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1218 struct hfi1_qp_priv *priv = qp->priv;
Don Hiatt566d53a2017-08-04 13:54:47 -07001219 struct verbs_txreq *tx = ps->s_txreq;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001220
1221 if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
1222 return dd->process_pio_send;
1223 switch (qp->ibqp.qp_type) {
1224 case IB_QPT_SMI:
1225 return dd->process_pio_send;
1226 case IB_QPT_GSI:
1227 case IB_QPT_UD:
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001228 break;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001229 case IB_QPT_UC:
Mike Marciniszyn270a9832019-02-26 08:46:16 -08001230 case IB_QPT_RC:
1231 priv->s_running_pkt_size =
1232 (tx->s_cur_size + priv->s_running_pkt_size) / 2;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001233 if (piothreshold &&
Mike Marciniszyn270a9832019-02-26 08:46:16 -08001234 priv->s_running_pkt_size <= min(piothreshold, qp->pmtu) &&
Don Hiatt566d53a2017-08-04 13:54:47 -07001235 (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) &&
Mike Marciniszyn47177f12016-03-07 11:35:41 -08001236 iowait_sdma_pending(&priv->s_iowait) == 0 &&
1237 !sdma_txreq_built(&tx->txreq))
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001238 return dd->process_pio_send;
1239 break;
1240 default:
1241 break;
1242 }
1243 return dd->process_dma_send;
1244}
1245
1246/**
Mike Marciniszyn77241052015-07-30 15:17:43 -04001247 * hfi1_verbs_send - send a packet
1248 * @qp: the QP to send on
Dennis Dalessandrod46e5142015-11-11 00:34:37 -05001249 * @ps: the state of the packet to send
Mike Marciniszyn77241052015-07-30 15:17:43 -04001250 *
1251 * Return zero if packet is sent or queued OK.
Dennis Dalessandro54d10c12016-01-19 14:43:01 -08001252 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001253 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001254int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001255{
1256 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
Mike Marciniszyn47177f12016-03-07 11:35:41 -08001257 struct hfi1_qp_priv *priv = qp->priv;
Don Hiatt81cd3892018-05-15 18:28:15 -07001258 struct ib_other_headers *ohdr = NULL;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001259 send_routine sr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001260 int ret;
Don Hiatt566d53a2017-08-04 13:54:47 -07001261 u16 pkey;
1262 u32 slid;
Don Hiatt81cd3892018-05-15 18:28:15 -07001263 u8 l4 = 0;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001264
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001265 /* locate the pkey within the headers */
Don Hiatt566d53a2017-08-04 13:54:47 -07001266 if (ps->s_txreq->phdr.hdr.hdr_type) {
1267 struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001268
Don Hiatt81cd3892018-05-15 18:28:15 -07001269 l4 = hfi1_16B_get_l4(hdr);
1270 if (l4 == OPA_16B_L4_IB_LOCAL)
Don Hiatt566d53a2017-08-04 13:54:47 -07001271 ohdr = &hdr->u.oth;
Don Hiatt81cd3892018-05-15 18:28:15 -07001272 else if (l4 == OPA_16B_L4_IB_GLOBAL)
1273 ohdr = &hdr->u.l.oth;
1274
Don Hiatt566d53a2017-08-04 13:54:47 -07001275 slid = hfi1_16B_get_slid(hdr);
1276 pkey = hfi1_16B_get_pkey(hdr);
1277 } else {
1278 struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh;
1279 u8 lnh = ib_get_lnh(hdr);
1280
1281 if (lnh == HFI1_LRH_GRH)
1282 ohdr = &hdr->u.l.oth;
1283 else
1284 ohdr = &hdr->u.oth;
1285 slid = ib_get_slid(hdr);
1286 pkey = ib_bth_get_pkey(ohdr);
1287 }
1288
Don Hiatt81cd3892018-05-15 18:28:15 -07001289 if (likely(l4 != OPA_16B_L4_FM))
1290 ps->opcode = ib_bth_get_opcode(ohdr);
1291 else
1292 ps->opcode = IB_OPCODE_UD_SEND_ONLY;
1293
Don Hiatt566d53a2017-08-04 13:54:47 -07001294 sr = get_send_routine(qp, ps);
1295 ret = egress_pkey_check(dd->pport, slid, pkey,
1296 priv->s_sc, qp->s_pkey_index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001297 if (unlikely(ret)) {
1298 /*
1299 * The value we are returning here does not get propagated to
1300 * the verbs caller. Thus we need to complete the request with
1301 * error otherwise the caller could be sitting waiting on the
1302 * completion event. Only do this for PIO. SDMA has its own
1303 * mechanism for handling the errors. So for SDMA we can just
1304 * return.
1305 */
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001306 if (sr == dd->process_pio_send) {
1307 unsigned long flags;
1308
Mike Marciniszyn77241052015-07-30 15:17:43 -04001309 hfi1_cdbg(PIO, "%s() Failed. Completing with err",
1310 __func__);
1311 spin_lock_irqsave(&qp->s_lock, flags);
Venkata Sandeep Dhanalakota116aa032018-09-26 10:44:42 -07001312 rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001313 spin_unlock_irqrestore(&qp->s_lock, flags);
1314 }
1315 return -EINVAL;
1316 }
Mike Marciniszyn47177f12016-03-07 11:35:41 -08001317 if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
1318 return pio_wait(qp,
1319 ps->s_txreq->psc,
1320 ps,
Mike Marciniszyn2e2ba092018-06-04 11:44:02 -07001321 HFI1_S_WAIT_PIO_DRAIN);
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001322 return sr(qp, ps, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001323}
1324
Harish Chegondi94d51712016-01-19 14:43:17 -08001325/**
1326 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1327 * @dd: the device data structure
1328 */
1329static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001330{
Harish Chegondi94d51712016-01-19 14:43:17 -08001331 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07001332 u32 ver = dd->dc8051_ver;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001333
Harish Chegondi94d51712016-01-19 14:43:17 -08001334 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001335
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07001336 rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) |
1337 ((u64)(dc8051_ver_min(ver)) << 16) |
1338 (u64)dc8051_ver_patch(ver);
1339
Harish Chegondi94d51712016-01-19 14:43:17 -08001340 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1341 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1342 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
Jianxin Xiongc72cfe32016-07-25 13:38:43 -07001343 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001344 IB_DEVICE_MEM_MGT_EXTENSIONS |
1345 IB_DEVICE_RDMA_NETDEV_OPA_VNIC;
Harish Chegondi94d51712016-01-19 14:43:17 -08001346 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1347 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
1348 rdi->dparms.props.vendor_part_id = dd->pcidev->device;
1349 rdi->dparms.props.hw_ver = dd->minrev;
1350 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
Jianxin Xiongc72cfe32016-07-25 13:38:43 -07001351 rdi->dparms.props.max_mr_size = U64_MAX;
1352 rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
Harish Chegondi94d51712016-01-19 14:43:17 -08001353 rdi->dparms.props.max_qp = hfi1_max_qps;
Kaike Wan3c6cb202019-01-23 21:51:39 -08001354 rdi->dparms.props.max_qp_wr =
1355 (hfi1_max_qp_wrs >= HFI1_QP_WQE_INVALID ?
1356 HFI1_QP_WQE_INVALID - 1 : hfi1_max_qp_wrs);
Steve Wise33023fb2018-06-18 08:05:26 -07001357 rdi->dparms.props.max_send_sge = hfi1_max_sges;
1358 rdi->dparms.props.max_recv_sge = hfi1_max_sges;
Harish Chegondi94d51712016-01-19 14:43:17 -08001359 rdi->dparms.props.max_sge_rd = hfi1_max_sges;
1360 rdi->dparms.props.max_cq = hfi1_max_cqs;
1361 rdi->dparms.props.max_ah = hfi1_max_ahs;
1362 rdi->dparms.props.max_cqe = hfi1_max_cqes;
Harish Chegondi94d51712016-01-19 14:43:17 -08001363 rdi->dparms.props.max_map_per_fmr = 32767;
1364 rdi->dparms.props.max_pd = hfi1_max_pds;
1365 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1366 rdi->dparms.props.max_qp_init_rd_atom = 255;
1367 rdi->dparms.props.max_srq = hfi1_max_srqs;
1368 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
1369 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
1370 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1371 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
1372 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
1373 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
1374 rdi->dparms.props.max_total_mcast_qp_attach =
1375 rdi->dparms.props.max_mcast_qp_attach *
1376 rdi->dparms.props.max_mcast_grp;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001377}
1378
1379static inline u16 opa_speed_to_ib(u16 in)
1380{
1381 u16 out = 0;
1382
1383 if (in & OPA_LINK_SPEED_25G)
1384 out |= IB_SPEED_EDR;
1385 if (in & OPA_LINK_SPEED_12_5G)
1386 out |= IB_SPEED_FDR;
1387
1388 return out;
1389}
1390
1391/*
1392 * Convert a single OPA link width (no multiple flags) to an IB value.
1393 * A zero OPA link width means link down, which means the IB width value
1394 * is a don't care.
1395 */
1396static inline u16 opa_width_to_ib(u16 in)
1397{
1398 switch (in) {
1399 case OPA_LINK_WIDTH_1X:
1400 /* map 2x and 3x to 1x as they don't exist in IB */
1401 case OPA_LINK_WIDTH_2X:
1402 case OPA_LINK_WIDTH_3X:
1403 return IB_WIDTH_1X;
1404 default: /* link down or unknown, return our largest width */
1405 case OPA_LINK_WIDTH_4X:
1406 return IB_WIDTH_4X;
1407 }
1408}
1409
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001410static int query_port(struct rvt_dev_info *rdi, u8 port_num,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001411 struct ib_port_attr *props)
1412{
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001413 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1414 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1415 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
Dasaratharaman Chandramouli51e658f52017-08-04 13:54:35 -07001416 u32 lid = ppd->lid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001417
Or Gerlitzc4550c62017-01-24 13:02:39 +02001418 /* props being zeroed by the caller, avoid zeroing it here */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001419 props->lid = lid ? lid : 0;
1420 props->lmc = ppd->lmc;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001421 /* OPA logical states match IB logical states */
1422 props->state = driver_lstate(ppd);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -07001423 props->phys_state = driver_pstate(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001424 props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001425 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
1426 /* see rate_show() in ib core/sysfs.c */
1427 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
1428 props->max_vl_num = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001429
1430 /* Once we are a "first class" citizen and have added the OPA MTUs to
1431 * the core we can advertise the larger MTU enum to the ULPs, for now
1432 * advertise only 4K.
1433 *
1434 * Those applications which are either OPA aware or pass the MTU enum
1435 * from the Path Records to us will get the new 8k MTU. Those that
1436 * attempt to process the MTU enum may fail in various ways.
1437 */
1438 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
1439 4096 : hfi1_max_mtu), IB_MTU_4096);
1440 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
Jan Sokolowski69a3ffa2017-11-14 04:34:45 -08001441 mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001442
1443 return 0;
1444}
1445
1446static int modify_device(struct ib_device *device,
1447 int device_modify_mask,
1448 struct ib_device_modify *device_modify)
1449{
1450 struct hfi1_devdata *dd = dd_from_ibdev(device);
1451 unsigned i;
1452 int ret;
1453
1454 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1455 IB_DEVICE_MODIFY_NODE_DESC)) {
1456 ret = -EOPNOTSUPP;
1457 goto bail;
1458 }
1459
1460 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001461 memcpy(device->node_desc, device_modify->node_desc,
1462 IB_DEVICE_NODE_DESC_MAX);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001463 for (i = 0; i < dd->num_pports; i++) {
1464 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1465
1466 hfi1_node_desc_chg(ibp);
1467 }
1468 }
1469
1470 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1471 ib_hfi1_sys_image_guid =
1472 cpu_to_be64(device_modify->sys_image_guid);
1473 for (i = 0; i < dd->num_pports; i++) {
1474 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1475
1476 hfi1_sys_guid_chg(ibp);
1477 }
1478 }
1479
1480 ret = 0;
1481
1482bail:
1483 return ret;
1484}
1485
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001486static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001487{
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001488 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1489 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1490 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1491 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001492
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001493 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
1494 OPA_LINKDOWN_REASON_UNKNOWN);
1495 ret = set_link_state(ppd, HLS_DN_DOWNDEF);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001496 return ret;
1497}
1498
Dennis Dalessandro25131462016-02-03 14:36:40 -08001499static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1500 int guid_index, __be64 *guid)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001501{
Dennis Dalessandro25131462016-02-03 14:36:40 -08001502 struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001503
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001504 if (guid_index >= HFI1_GUIDS_PER_PORT)
Dennis Dalessandro25131462016-02-03 14:36:40 -08001505 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001506
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001507 *guid = get_sguid(ibp, guid_index);
Dennis Dalessandro25131462016-02-03 14:36:40 -08001508 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001509}
1510
Mike Marciniszyn77241052015-07-30 15:17:43 -04001511/*
1512 * convert ah port,sl to sc
1513 */
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001514u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001515{
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001516 struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001517
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001518 return ibp->sl_to_sc[rdma_ah_get_sl(ah)];
Mike Marciniszyn77241052015-07-30 15:17:43 -04001519}
1520
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001521static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001522{
1523 struct hfi1_ibport *ibp;
1524 struct hfi1_pportdata *ppd;
1525 struct hfi1_devdata *dd;
1526 u8 sc5;
Ira Weiny0dbfaa92018-09-20 12:58:46 -07001527 u8 sl;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001528
Don Hiatt13c19222017-08-04 13:53:51 -07001529 if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
1530 !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
1531 return -EINVAL;
1532
Mike Marciniszyn77241052015-07-30 15:17:43 -04001533 /* test the mapping for validity */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001534 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001535 ppd = ppd_from_ibp(ibp);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001536 dd = dd_from_ppd(ppd);
Ira Weiny0dbfaa92018-09-20 12:58:46 -07001537
1538 sl = rdma_ah_get_sl(ah_attr);
1539 if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
1540 return -EINVAL;
Gustavo A. R. Silva6497d0a2019-07-31 12:54:28 -05001541 sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
Ira Weiny0dbfaa92018-09-20 12:58:46 -07001542
1543 sc5 = ibp->sl_to_sc[sl];
Mike Marciniszyn77241052015-07-30 15:17:43 -04001544 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
Dennis Dalessandro15723f02016-01-19 14:42:17 -08001545 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001546 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001547}
1548
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001549static void hfi1_notify_new_ah(struct ib_device *ibdev,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001550 struct rdma_ah_attr *ah_attr,
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001551 struct rvt_ah *ah)
1552{
1553 struct hfi1_ibport *ibp;
1554 struct hfi1_pportdata *ppd;
1555 struct hfi1_devdata *dd;
1556 u8 sc5;
Don Hiattd98bb7f2017-08-04 13:54:16 -07001557 struct rdma_ah_attr *attr = &ah->attr;
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001558
1559 /*
1560 * Do not trust reading anything from rvt_ah at this point as it is not
1561 * done being setup. We can however modify things which we need to set.
1562 */
1563
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001564 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001565 ppd = ppd_from_ibp(ibp);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001566 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)];
Don Hiattd98bb7f2017-08-04 13:54:16 -07001567 hfi1_update_ah_attr(ibdev, attr);
1568 hfi1_make_opa_lid(attr);
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001569 dd = dd_from_ppd(ppd);
1570 ah->vl = sc_to_vlt(dd, sc5);
1571 if (ah->vl < num_vls || ah->vl == 15)
1572 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
1573}
1574
Mike Marciniszyn77241052015-07-30 15:17:43 -04001575/**
Mike Marciniszyn77241052015-07-30 15:17:43 -04001576 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1577 * @dd: the hfi1_ib device
1578 */
1579unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
1580{
1581 return ARRAY_SIZE(dd->pport[0].pkeys);
1582}
1583
Mike Marciniszyn77241052015-07-30 15:17:43 -04001584static void init_ibport(struct hfi1_pportdata *ppd)
1585{
1586 struct hfi1_ibport *ibp = &ppd->ibport_data;
1587 size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
1588 int i;
1589
1590 for (i = 0; i < sz; i++) {
1591 ibp->sl_to_sc[i] = i;
1592 ibp->sc_to_sl[i] = i;
1593 }
1594
Michael J. Ruhlbf90aad2017-07-24 07:46:12 -07001595 for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
1596 INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
Kees Cook80641352017-10-16 15:51:54 -07001597 timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
Michael J. Ruhlbf90aad2017-07-24 07:46:12 -07001598
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001599 spin_lock_init(&ibp->rvp.lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001600 /* Set the prefix to the default value (see ch. 4.1.1) */
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001601 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1602 ibp->rvp.sm_lid = 0;
Vishwanathapura, Niranjanacb493662017-06-01 17:04:02 -07001603 /*
1604 * Below should only set bits defined in OPA PortInfo.CapabilityMask
1605 * and PortInfo.CapabilityMask3
1606 */
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001607 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
Mike Marciniszyn77241052015-07-30 15:17:43 -04001608 IB_PORT_CAP_MASK_NOTICE_SUP;
Vishwanathapura, Niranjanacb493662017-06-01 17:04:02 -07001609 ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001610 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1611 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1612 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1613 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1614 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001615
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001616 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1617 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001618}
1619
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03001620static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str)
Ira Weiny939b6ca2016-06-15 02:22:08 -04001621{
1622 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
1623 struct hfi1_ibdev *dev = dev_from_rdi(rdi);
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07001624 u32 ver = dd_from_dev(dev)->dc8051_ver;
Ira Weiny939b6ca2016-06-15 02:22:08 -04001625
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03001626 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u", dc8051_ver_maj(ver),
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07001627 dc8051_ver_min(ver), dc8051_ver_patch(ver));
Ira Weiny939b6ca2016-06-15 02:22:08 -04001628}
1629
Jianxin Xiongb7481942016-12-07 19:32:53 -08001630static const char * const driver_cntr_names[] = {
1631 /* must be element 0*/
1632 "DRIVER_KernIntr",
1633 "DRIVER_ErrorIntr",
1634 "DRIVER_Tx_Errs",
1635 "DRIVER_Rcv_Errs",
1636 "DRIVER_HW_Errs",
1637 "DRIVER_NoPIOBufs",
1638 "DRIVER_CtxtsOpen",
1639 "DRIVER_RcvLen_Errs",
1640 "DRIVER_EgrBufFull",
1641 "DRIVER_EgrHdrFull"
1642};
1643
Tadeusz Struk62eed662017-03-20 17:25:35 -07001644static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
Jianxin Xiongb7481942016-12-07 19:32:53 -08001645static const char **dev_cntr_names;
1646static const char **port_cntr_names;
Piotr Stankiewicz36d84212018-11-28 06:44:46 -08001647int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001648static int num_dev_cntrs;
1649static int num_port_cntrs;
1650static int cntr_names_initialized;
1651
1652/*
1653 * Convert a list of names separated by '\n' into an array of NULL terminated
1654 * strings. Optionally some entries can be reserved in the array to hold extra
1655 * external strings.
1656 */
1657static int init_cntr_names(const char *names_in,
Arnd Bergmann64b2ae72017-02-14 22:23:07 +01001658 const size_t names_len,
Jianxin Xiongb7481942016-12-07 19:32:53 -08001659 int num_extra_names,
1660 int *num_cntrs,
1661 const char ***cntr_names)
1662{
1663 char *names_out, *p, **q;
1664 int i, n;
1665
1666 n = 0;
1667 for (i = 0; i < names_len; i++)
1668 if (names_in[i] == '\n')
1669 n++;
1670
1671 names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
1672 GFP_KERNEL);
1673 if (!names_out) {
1674 *num_cntrs = 0;
1675 *cntr_names = NULL;
1676 return -ENOMEM;
1677 }
1678
1679 p = names_out + (n + num_extra_names) * sizeof(char *);
1680 memcpy(p, names_in, names_len);
1681
1682 q = (char **)names_out;
1683 for (i = 0; i < n; i++) {
1684 q[i] = p;
1685 p = strchr(p, '\n');
1686 *p++ = '\0';
1687 }
1688
1689 *num_cntrs = n;
1690 *cntr_names = (const char **)names_out;
1691 return 0;
1692}
1693
1694static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
1695 u8 port_num)
1696{
1697 int i, err;
1698
Tadeusz Struk62eed662017-03-20 17:25:35 -07001699 mutex_lock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001700 if (!cntr_names_initialized) {
1701 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1702
1703 err = init_cntr_names(dd->cntrnames,
1704 dd->cntrnameslen,
1705 num_driver_cntrs,
1706 &num_dev_cntrs,
1707 &dev_cntr_names);
Tadeusz Struk62eed662017-03-20 17:25:35 -07001708 if (err) {
1709 mutex_unlock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001710 return NULL;
Tadeusz Struk62eed662017-03-20 17:25:35 -07001711 }
Jianxin Xiongb7481942016-12-07 19:32:53 -08001712
1713 for (i = 0; i < num_driver_cntrs; i++)
1714 dev_cntr_names[num_dev_cntrs + i] =
1715 driver_cntr_names[i];
1716
1717 err = init_cntr_names(dd->portcntrnames,
1718 dd->portcntrnameslen,
1719 0,
1720 &num_port_cntrs,
1721 &port_cntr_names);
1722 if (err) {
1723 kfree(dev_cntr_names);
1724 dev_cntr_names = NULL;
Tadeusz Struk62eed662017-03-20 17:25:35 -07001725 mutex_unlock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001726 return NULL;
1727 }
1728 cntr_names_initialized = 1;
1729 }
Tadeusz Struk62eed662017-03-20 17:25:35 -07001730 mutex_unlock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001731
1732 if (!port_num)
1733 return rdma_alloc_hw_stats_struct(
1734 dev_cntr_names,
1735 num_dev_cntrs + num_driver_cntrs,
1736 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1737 else
1738 return rdma_alloc_hw_stats_struct(
1739 port_cntr_names,
1740 num_port_cntrs,
1741 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1742}
1743
1744static u64 hfi1_sps_ints(void)
1745{
Matthew Wilcox03b92782019-02-08 15:41:29 -05001746 unsigned long index, flags;
Jianxin Xiongb7481942016-12-07 19:32:53 -08001747 struct hfi1_devdata *dd;
1748 u64 sps_ints = 0;
1749
Matthew Wilcox03b92782019-02-08 15:41:29 -05001750 xa_lock_irqsave(&hfi1_dev_table, flags);
1751 xa_for_each(&hfi1_dev_table, index, dd) {
Jianxin Xiongb7481942016-12-07 19:32:53 -08001752 sps_ints += get_all_cpu_total(dd->int_counter);
1753 }
Matthew Wilcox03b92782019-02-08 15:41:29 -05001754 xa_unlock_irqrestore(&hfi1_dev_table, flags);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001755 return sps_ints;
1756}
1757
1758static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1759 u8 port, int index)
1760{
1761 u64 *values;
1762 int count;
1763
1764 if (!port) {
1765 u64 *stats = (u64 *)&hfi1_stats;
1766 int i;
1767
1768 hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
1769 values[num_dev_cntrs] = hfi1_sps_ints();
1770 for (i = 1; i < num_driver_cntrs; i++)
1771 values[num_dev_cntrs + i] = stats[i];
1772 count = num_dev_cntrs + num_driver_cntrs;
1773 } else {
1774 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1775
1776 hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
1777 count = num_port_cntrs;
1778 }
1779
1780 memcpy(stats->value, values, count * sizeof(u64));
1781 return count;
1782}
1783
Kamal Heibe3c320c2018-12-10 21:09:34 +02001784static const struct ib_device_ops hfi1_dev_ops = {
Jason Gunthorpe7a154142019-06-05 14:39:26 -03001785 .owner = THIS_MODULE,
Jason Gunthorpeb9560a42019-06-05 14:39:24 -03001786 .driver_id = RDMA_DRIVER_HFI1,
1787
Kamal Heibe3c320c2018-12-10 21:09:34 +02001788 .alloc_hw_stats = alloc_hw_stats,
1789 .alloc_rdma_netdev = hfi1_vnic_alloc_rn,
1790 .get_dev_fw_str = hfi1_get_dev_fw_str,
1791 .get_hw_stats = get_hw_stats,
Parav Panditea4baf72018-12-18 14:28:30 +02001792 .init_port = hfi1_create_port_files,
Kamal Heibe3c320c2018-12-10 21:09:34 +02001793 .modify_device = modify_device,
1794 /* keep process mad in the driver */
1795 .process_mad = hfi1_process_mad,
1796};
1797
Mike Marciniszyn77241052015-07-30 15:17:43 -04001798/**
1799 * hfi1_register_ib_device - register our device with the infiniband core
1800 * @dd: the device data structure
1801 * Return 0 if successful, errno if unsuccessful.
1802 */
1803int hfi1_register_ib_device(struct hfi1_devdata *dd)
1804{
1805 struct hfi1_ibdev *dev = &dd->verbs_dev;
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001806 struct ib_device *ibdev = &dev->rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001807 struct hfi1_pportdata *ppd = dd->pport;
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001808 struct hfi1_ibport *ibp = &ppd->ibport_data;
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001809 unsigned i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001810 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001811
Mike Marciniszyn77241052015-07-30 15:17:43 -04001812 for (i = 0; i < dd->num_pports; i++)
1813 init_ibport(ppd + i);
1814
1815 /* Only need to initialize non-zero fields. */
Dennis Dalessandro4f87ccf2016-01-19 14:41:50 -08001816
Kees Cook80641352017-10-16 15:51:54 -07001817 timer_setup(&dev->mem_timer, mem_timer, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001818
Mike Marciniszyn77241052015-07-30 15:17:43 -04001819 seqlock_init(&dev->iowait_lock);
Mike Marciniszyn4e045572016-10-10 06:14:28 -07001820 seqlock_init(&dev->txwait_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001821 INIT_LIST_HEAD(&dev->txwait);
1822 INIT_LIST_HEAD(&dev->memwait);
1823
Mike Marciniszyn45842ab2016-02-14 12:44:34 -08001824 ret = verbs_txreq_init(dev);
1825 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001826 goto err_verbs_txreq;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001827
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001828 /* Use first-port GUID as node guid */
1829 ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
1830
Mike Marciniszyn77241052015-07-30 15:17:43 -04001831 /*
1832 * The system image GUID is supposed to be the same for all
1833 * HFIs in a single system but since there can be other
1834 * device types in the system, we can't be sure this is unique.
1835 */
1836 if (!ib_hfi1_sys_image_guid)
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001837 ib_hfi1_sys_image_guid = ibdev->node_guid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001838 ibdev->phys_port_cnt = dd->num_pports;
Bart Van Assche30677712017-01-20 13:04:17 -08001839 ibdev->dev.parent = &dd->pcidev->dev;
Dennis Dalessandro43316292016-01-19 14:44:01 -08001840
Kamal Heibe3c320c2018-12-10 21:09:34 +02001841 ib_set_device_ops(ibdev, &hfi1_dev_ops);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001842
Bart Van Assche522628e2018-07-10 11:32:16 -07001843 strlcpy(ibdev->node_desc, init_utsname()->nodename,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001844 sizeof(ibdev->node_desc));
1845
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001846 /*
1847 * Fill in rvt info object.
1848 */
Dennis Dalessandro49dbb6c2016-01-19 14:42:06 -08001849 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
Dennis Dalessandro15723f02016-01-19 14:42:17 -08001850 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001851 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
Dennis Dalessandro25131462016-02-03 14:36:40 -08001852 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001853 dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
1854 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
1855 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
Harish Chegondi94d51712016-01-19 14:43:17 -08001856 /*
1857 * Fill in rvt info device attributes.
1858 */
1859 hfi1_fill_device_attr(dd);
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001860
1861 /* queue pair */
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001862 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
1863 dd->verbs_dev.rdi.dparms.qpn_start = 0;
1864 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1865 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
1866 dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
1867 dd->verbs_dev.rdi.dparms.qpn_res_end =
Dennis Dalessandroabd712d2016-01-19 14:43:22 -08001868 dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
Dennis Dalessandroec4274f2016-01-19 14:43:44 -08001869 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
1870 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
1871 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
1872 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
Dasaratharaman Chandramouli72214032017-08-04 13:54:53 -07001873 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA |
1874 RDMA_CORE_CAP_OPA_AH;
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001875 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
1876
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001877 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
Mike Marciniszyn5190f052018-11-28 10:22:31 -08001878 dd->verbs_dev.rdi.driver_f.qp_priv_init = hfi1_qp_priv_init;
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001879 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1880 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1881 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
Mike Marciniszynb6eac932017-04-09 10:16:35 -07001882 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
Dennis Dalessandro83693bd2016-01-19 14:43:33 -08001883 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001884 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
Dennis Dalessandroec4274f2016-01-19 14:43:44 -08001885 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
1886 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1887 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
1888 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
1889 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
1890 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1891 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
1892 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
1893 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
1894 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
Venkata Sandeep Dhanalakota56acbbf2017-02-08 05:27:19 -08001895 dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
Kaike Wand205a06a2018-09-26 10:26:44 -07001896 dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
Sebastian Sanchez5d18ee62018-05-02 06:43:55 -07001897 dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
1898 hfi1_comp_vect_mappings_lookup;
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001899
Dennis Dalessandroabd712d2016-01-19 14:43:22 -08001900 /* completeion queue */
Sebastian Sanchez5d18ee62018-05-02 06:43:55 -07001901 dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus;
Mitko Haralanov27807392016-02-03 14:33:31 -08001902 dd->verbs_dev.rdi.dparms.node = dd->node;
Dennis Dalessandroabd712d2016-01-19 14:43:22 -08001903
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001904 /* misc settings */
Dennis Dalessandroabd712d2016-01-19 14:43:22 -08001905 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001906 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001907 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1908 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
Brian Welty019f1182018-09-26 10:44:33 -07001909 dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
1910 dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
1911 dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
Kaike Wan48a615d2019-01-23 19:21:11 -08001912 dd->verbs_dev.rdi.dparms.reserved_operations = 1;
Kaike Wanf5a4a952019-01-23 21:48:38 -08001913 dd->verbs_dev.rdi.dparms.extra_rdma_atomic = HFI1_TID_RDMA_WRITE_CNT;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001914
Mike Marciniszyn1ac57c52016-07-01 16:02:13 -07001915 /* post send table */
1916 dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
1917
Venkata Sandeep Dhanalakota116aa032018-09-26 10:44:42 -07001918 /* opcode translation table */
1919 dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode;
1920
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001921 ppd = dd->pport;
1922 for (i = 0; i < dd->num_pports; i++, ppd++)
1923 rvt_init_port(&dd->verbs_dev.rdi,
1924 &ppd->ibport_data.rvp,
1925 i,
1926 ppd->pkeys);
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001927
Parav Pandit508a5232018-10-11 22:31:54 +03001928 rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev,
1929 &ib_hfi1_attr_group);
1930
Jason Gunthorpeb9560a42019-06-05 14:39:24 -03001931 ret = rvt_register_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001932 if (ret)
Dennis Dalessandro9c4a3112016-01-19 14:44:11 -08001933 goto err_verbs_txreq;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001934
1935 ret = hfi1_verbs_register_sysfs(dd);
1936 if (ret)
1937 goto err_class;
1938
Dennis Dalessandro9c4a3112016-01-19 14:44:11 -08001939 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001940
1941err_class:
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001942 rvt_unregister_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001943err_verbs_txreq:
Mike Marciniszyn45842ab2016-02-14 12:44:34 -08001944 verbs_txreq_exit(dev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001945 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001946 return ret;
1947}
1948
1949void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
1950{
1951 struct hfi1_ibdev *dev = &dd->verbs_dev;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001952
1953 hfi1_verbs_unregister_sysfs(dd);
1954
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001955 rvt_unregister_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001956
1957 if (!list_empty(&dev->txwait))
1958 dd_dev_err(dd, "txwait list not empty!\n");
1959 if (!list_empty(&dev->memwait))
1960 dd_dev_err(dd, "memwait list not empty!\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04001961
Mike Marciniszyn77241052015-07-30 15:17:43 -04001962 del_timer_sync(&dev->mem_timer);
Mike Marciniszyn45842ab2016-02-14 12:44:34 -08001963 verbs_txreq_exit(dev);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001964
Tadeusz Struk62eed662017-03-20 17:25:35 -07001965 mutex_lock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001966 kfree(dev_cntr_names);
1967 kfree(port_cntr_names);
Tadeusz Struk62eed662017-03-20 17:25:35 -07001968 dev_cntr_names = NULL;
1969 port_cntr_names = NULL;
Jianxin Xiongb7481942016-12-07 19:32:53 -08001970 cntr_names_initialized = 0;
Tadeusz Struk62eed662017-03-20 17:25:35 -07001971 mutex_unlock(&cntr_names_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001972}
1973
Mike Marciniszyn77241052015-07-30 15:17:43 -04001974void hfi1_cnp_rcv(struct hfi1_packet *packet)
1975{
Sebastian Sanchezf3e862c2017-02-08 05:26:25 -08001976 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
Arthur Kepner977940b2015-11-04 21:10:10 -05001977 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
Mike Marciniszyn261a4352016-09-06 04:35:05 -07001978 struct ib_header *hdr = packet->hdr;
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001979 struct rvt_qp *qp = packet->qp;
Arthur Kepner977940b2015-11-04 21:10:10 -05001980 u32 lqpn, rqpn = 0;
1981 u16 rlid = 0;
Dasaratharaman Chandramoulib736a462016-07-25 13:40:34 -07001982 u8 sl, sc5, svc_type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001983
Arthur Kepner977940b2015-11-04 21:10:10 -05001984 switch (packet->qp->ibqp.qp_type) {
1985 case IB_QPT_UC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001986 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
Arthur Kepner977940b2015-11-04 21:10:10 -05001987 rqpn = qp->remote_qpn;
1988 svc_type = IB_CC_SVCTYPE_UC;
1989 break;
1990 case IB_QPT_RC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001991 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
Arthur Kepner977940b2015-11-04 21:10:10 -05001992 rqpn = qp->remote_qpn;
1993 svc_type = IB_CC_SVCTYPE_RC;
1994 break;
1995 case IB_QPT_SMI:
1996 case IB_QPT_GSI:
1997 case IB_QPT_UD:
1998 svc_type = IB_CC_SVCTYPE_UD;
1999 break;
2000 default:
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08002001 ibp->rvp.n_pkt_drops++;
Arthur Kepner977940b2015-11-04 21:10:10 -05002002 return;
2003 }
2004
Dasaratharaman Chandramouliaad559c2017-04-09 10:16:15 -07002005 sc5 = hfi1_9B_get_sc5(hdr, packet->rhf);
Arthur Kepner977940b2015-11-04 21:10:10 -05002006 sl = ibp->sc_to_sl[sc5];
2007 lqpn = qp->ibqp.qp_num;
2008
2009 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04002010}