blob: 08991874c0e2c4d12918dfdfed059cb1c6eacf78 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Mitko Haralanova74d5302018-05-02 06:43:24 -07002 * Copyright(c) 2015 - 2018 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <rdma/ib_mad.h>
49#include <rdma/ib_user_verbs.h>
50#include <linux/io.h>
51#include <linux/module.h>
52#include <linux/utsname.h>
53#include <linux/rculist.h>
54#include <linux/mm.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040055#include <linux/vmalloc.h>
Don Hiatt13c19222017-08-04 13:53:51 -070056#include <rdma/opa_addr.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040057
58#include "hfi.h"
59#include "common.h"
60#include "device.h"
61#include "trace.h"
62#include "qp.h"
Mike Marciniszyn45842ab2016-02-14 12:44:34 -080063#include "verbs_txreq.h"
Don Hiatt0181ce32017-03-20 17:26:14 -070064#include "debugfs.h"
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070065#include "vnic.h"
Mitko Haralanova74d5302018-05-02 06:43:24 -070066#include "fault.h"
Sebastian Sanchez5d18ee62018-05-02 06:43:55 -070067#include "affinity.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
Dennis Dalessandro895420d2016-01-19 14:42:28 -080069static unsigned int hfi1_lkey_table_size = 16;
Mike Marciniszyn77241052015-07-30 15:17:43 -040070module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
71 S_IRUGO);
72MODULE_PARM_DESC(lkey_table_size,
73 "LKEY table size in bits (2^n, 1 <= n <= 23)");
74
75static unsigned int hfi1_max_pds = 0xFFFF;
76module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
77MODULE_PARM_DESC(max_pds,
78 "Maximum number of protection domains to support");
79
80static unsigned int hfi1_max_ahs = 0xFFFF;
81module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
82MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
83
Jianxin Xiongf6aa78352016-09-25 07:41:18 -070084unsigned int hfi1_max_cqes = 0x2FFFFF;
Mike Marciniszyn77241052015-07-30 15:17:43 -040085module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
86MODULE_PARM_DESC(max_cqes,
87 "Maximum number of completion queue entries to support");
88
89unsigned int hfi1_max_cqs = 0x1FFFF;
90module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
91MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
92
93unsigned int hfi1_max_qp_wrs = 0x3FFF;
94module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
95MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
96
Jianxin Xiongf6aa78352016-09-25 07:41:18 -070097unsigned int hfi1_max_qps = 32768;
Mike Marciniszyn77241052015-07-30 15:17:43 -040098module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
99MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
100
101unsigned int hfi1_max_sges = 0x60;
102module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
103MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
104
105unsigned int hfi1_max_mcast_grps = 16384;
106module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
107MODULE_PARM_DESC(max_mcast_grps,
108 "Maximum number of multicast groups to support");
109
110unsigned int hfi1_max_mcast_qp_attached = 16;
111module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
112 uint, S_IRUGO);
113MODULE_PARM_DESC(max_mcast_qp_attached,
114 "Maximum number of attached QPs to support");
115
116unsigned int hfi1_max_srqs = 1024;
117module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
118MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
119
120unsigned int hfi1_max_srq_sges = 128;
121module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
122MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
123
124unsigned int hfi1_max_srq_wrs = 0x1FFFF;
125module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
126MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
127
Mike Marciniszynd0e859c2016-03-07 11:35:46 -0800128unsigned short piothreshold = 256;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800129module_param(piothreshold, ushort, S_IRUGO);
130MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
131
Dean Luick528ee9f2016-03-05 08:50:43 -0800132#define COPY_CACHELESS 1
133#define COPY_ADAPTIVE 2
134static unsigned int sge_copy_mode;
135module_param(sge_copy_mode, uint, S_IRUGO);
136MODULE_PARM_DESC(sge_copy_mode,
137 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
138
Mike Marciniszyn77241052015-07-30 15:17:43 -0400139static void verbs_sdma_complete(
140 struct sdma_txreq *cookie,
Mike Marciniszyna545f532016-02-14 12:45:53 -0800141 int status);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400142
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800143static int pio_wait(struct rvt_qp *qp,
144 struct send_context *sc,
145 struct hfi1_pkt_state *ps,
146 u32 flag);
147
Jubin John64ffd862015-10-26 10:28:47 -0400148/* Length of buffer to create verbs txreq cache name */
149#define TXREQ_NAME_LEN 24
150
Don Hiattf8195f32017-10-09 12:38:19 -0700151/* 16B trailing buffer */
152static const u8 trail_buf[MAX_16B_PADDING];
153
Dean Luick528ee9f2016-03-05 08:50:43 -0800154static uint wss_threshold;
155module_param(wss_threshold, uint, S_IRUGO);
156MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
157static uint wss_clean_period = 256;
158module_param(wss_clean_period, uint, S_IRUGO);
159MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
160
161/* memory working set size */
162struct hfi1_wss {
163 unsigned long *entries;
164 atomic_t total_count;
165 atomic_t clean_counter;
166 atomic_t clean_entry;
167
168 int threshold;
169 int num_entries;
170 long pages_mask;
171};
172
173static struct hfi1_wss wss;
174
175int hfi1_wss_init(void)
176{
177 long llc_size;
178 long llc_bits;
179 long table_size;
180 long table_bits;
181
182 /* check for a valid percent range - default to 80 if none or invalid */
183 if (wss_threshold < 1 || wss_threshold > 100)
184 wss_threshold = 80;
185 /* reject a wildly large period */
186 if (wss_clean_period > 1000000)
187 wss_clean_period = 256;
188 /* reject a zero period */
189 if (wss_clean_period == 0)
190 wss_clean_period = 1;
191
192 /*
193 * Calculate the table size - the next power of 2 larger than the
194 * LLC size. LLC size is in KiB.
195 */
196 llc_size = wss_llc_size() * 1024;
197 table_size = roundup_pow_of_two(llc_size);
198
199 /* one bit per page in rounded up table */
200 llc_bits = llc_size / PAGE_SIZE;
201 table_bits = table_size / PAGE_SIZE;
202 wss.pages_mask = table_bits - 1;
203 wss.num_entries = table_bits / BITS_PER_LONG;
204
205 wss.threshold = (llc_bits * wss_threshold) / 100;
206 if (wss.threshold == 0)
207 wss.threshold = 1;
208
209 atomic_set(&wss.clean_counter, wss_clean_period);
210
211 wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
212 GFP_KERNEL);
213 if (!wss.entries) {
214 hfi1_wss_exit();
215 return -ENOMEM;
216 }
217
218 return 0;
219}
220
221void hfi1_wss_exit(void)
222{
223 /* coded to handle partially initialized and repeat callers */
224 kfree(wss.entries);
225 wss.entries = NULL;
226}
227
228/*
229 * Advance the clean counter. When the clean period has expired,
230 * clean an entry.
231 *
232 * This is implemented in atomics to avoid locking. Because multiple
233 * variables are involved, it can be racy which can lead to slightly
234 * inaccurate information. Since this is only a heuristic, this is
235 * OK. Any innaccuracies will clean themselves out as the counter
236 * advances. That said, it is unlikely the entry clean operation will
237 * race - the next possible racer will not start until the next clean
238 * period.
239 *
240 * The clean counter is implemented as a decrement to zero. When zero
241 * is reached an entry is cleaned.
242 */
243static void wss_advance_clean_counter(void)
244{
245 int entry;
246 int weight;
247 unsigned long bits;
248
249 /* become the cleaner if we decrement the counter to zero */
250 if (atomic_dec_and_test(&wss.clean_counter)) {
251 /*
252 * Set, not add, the clean period. This avoids an issue
253 * where the counter could decrement below the clean period.
254 * Doing a set can result in lost decrements, slowing the
255 * clean advance. Since this a heuristic, this possible
256 * slowdown is OK.
257 *
258 * An alternative is to loop, advancing the counter by a
259 * clean period until the result is > 0. However, this could
260 * lead to several threads keeping another in the clean loop.
261 * This could be mitigated by limiting the number of times
262 * we stay in the loop.
263 */
264 atomic_set(&wss.clean_counter, wss_clean_period);
265
266 /*
267 * Uniquely grab the entry to clean and move to next.
268 * The current entry is always the lower bits of
269 * wss.clean_entry. The table size, wss.num_entries,
270 * is always a power-of-2.
271 */
272 entry = (atomic_inc_return(&wss.clean_entry) - 1)
273 & (wss.num_entries - 1);
274
275 /* clear the entry and count the bits */
276 bits = xchg(&wss.entries[entry], 0);
277 weight = hweight64((u64)bits);
278 /* only adjust the contended total count if needed */
279 if (weight)
280 atomic_sub(weight, &wss.total_count);
281 }
282}
283
284/*
285 * Insert the given address into the working set array.
286 */
287static void wss_insert(void *address)
288{
289 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
290 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
291 u32 nr = page & (BITS_PER_LONG - 1);
292
293 if (!test_and_set_bit(nr, &wss.entries[entry]))
294 atomic_inc(&wss.total_count);
295
296 wss_advance_clean_counter();
297}
298
299/*
300 * Is the working set larger than the threshold?
301 */
Brian Welty0128fce2017-02-08 05:27:31 -0800302static inline bool wss_exceeds_threshold(void)
Dean Luick528ee9f2016-03-05 08:50:43 -0800303{
304 return atomic_read(&wss.total_count) >= wss.threshold;
305}
306
Mike Marciniszyn77241052015-07-30 15:17:43 -0400307/*
Mike Marciniszyn43a474a2017-03-20 17:25:04 -0700308 * Translate ib_wr_opcode into ib_wc_opcode.
309 */
310const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
311 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
312 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
313 [IB_WR_SEND] = IB_WC_SEND,
314 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
315 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
316 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
317 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
318 [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
319 [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
320 [IB_WR_REG_MR] = IB_WC_REG_MR
321};
322
323/*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400324 * Length of header by opcode, 0 --> not supported
325 */
326const u8 hdr_len_by_opcode[256] = {
327 /* RC */
328 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
329 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
330 [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
331 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
332 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
333 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
334 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
335 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
336 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
337 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
338 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
339 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
340 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
341 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
342 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
343 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
344 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
345 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
Mike Marciniszyn37aab622016-09-30 20:11:15 -0700346 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4 + 8,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400347 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
348 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
Jianxin Xiongbdd8a982016-05-24 12:50:17 -0700349 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
350 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400351 /* UC */
352 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
353 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
354 [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
355 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
356 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
357 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
358 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
359 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
360 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
361 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
362 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
363 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
364 /* UD */
365 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
366 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
367};
368
369static const opcode_handler opcode_handler_tbl[256] = {
370 /* RC */
371 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
372 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
373 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
374 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
375 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
376 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
377 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
378 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
379 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
380 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
381 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
382 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
383 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
384 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
385 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
386 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
387 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
388 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
389 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
390 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
391 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
Jianxin Xionga2df0c82016-07-25 13:38:31 -0700392 [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv,
393 [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400394 /* UC */
395 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
396 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
397 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
398 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
399 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
400 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
401 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
402 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
403 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
404 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
405 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
406 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
407 /* UD */
408 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
409 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
410 /* CNP */
411 [IB_OPCODE_CNP] = &hfi1_cnp_rcv
412};
413
Mike Marciniszynb374e062016-09-25 07:40:58 -0700414#define OPMASK 0x1f
415
416static const u32 pio_opmask[BIT(3)] = {
417 /* RC */
418 [IB_OPCODE_RC >> 5] =
419 BIT(RC_OP(SEND_ONLY) & OPMASK) |
420 BIT(RC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
421 BIT(RC_OP(RDMA_WRITE_ONLY) & OPMASK) |
422 BIT(RC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK) |
423 BIT(RC_OP(RDMA_READ_REQUEST) & OPMASK) |
424 BIT(RC_OP(ACKNOWLEDGE) & OPMASK) |
425 BIT(RC_OP(ATOMIC_ACKNOWLEDGE) & OPMASK) |
426 BIT(RC_OP(COMPARE_SWAP) & OPMASK) |
427 BIT(RC_OP(FETCH_ADD) & OPMASK),
428 /* UC */
429 [IB_OPCODE_UC >> 5] =
430 BIT(UC_OP(SEND_ONLY) & OPMASK) |
431 BIT(UC_OP(SEND_ONLY_WITH_IMMEDIATE) & OPMASK) |
432 BIT(UC_OP(RDMA_WRITE_ONLY) & OPMASK) |
433 BIT(UC_OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE) & OPMASK),
434};
435
Mike Marciniszyn77241052015-07-30 15:17:43 -0400436/*
437 * System image GUID.
438 */
439__be64 ib_hfi1_sys_image_guid;
440
441/**
442 * hfi1_copy_sge - copy data to SGE memory
443 * @ss: the SGE state
444 * @data: the data to copy
445 * @length: the length of the data
Brian Welty0128fce2017-02-08 05:27:31 -0800446 * @release: boolean to release MR
Dean Luick7b0b01a2016-02-03 14:35:49 -0800447 * @copy_last: do a separate copy of the last 8 bytes
Mike Marciniszyn77241052015-07-30 15:17:43 -0400448 */
449void hfi1_copy_sge(
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800450 struct rvt_sge_state *ss,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400451 void *data, u32 length,
Brian Welty0128fce2017-02-08 05:27:31 -0800452 bool release,
453 bool copy_last)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400454{
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800455 struct rvt_sge *sge = &ss->sge;
Dean Luick7b0b01a2016-02-03 14:35:49 -0800456 int i;
Brian Welty0128fce2017-02-08 05:27:31 -0800457 bool in_last = false;
458 bool cacheless_copy = false;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400459
Dean Luick528ee9f2016-03-05 08:50:43 -0800460 if (sge_copy_mode == COPY_CACHELESS) {
461 cacheless_copy = length >= PAGE_SIZE;
462 } else if (sge_copy_mode == COPY_ADAPTIVE) {
463 if (length >= PAGE_SIZE) {
464 /*
465 * NOTE: this *assumes*:
466 * o The first vaddr is the dest.
467 * o If multiple pages, then vaddr is sequential.
468 */
469 wss_insert(sge->vaddr);
470 if (length >= (2 * PAGE_SIZE))
471 wss_insert(sge->vaddr + PAGE_SIZE);
472
473 cacheless_copy = wss_exceeds_threshold();
474 } else {
475 wss_advance_clean_counter();
476 }
477 }
Dean Luick7b0b01a2016-02-03 14:35:49 -0800478 if (copy_last) {
479 if (length > 8) {
480 length -= 8;
481 } else {
Brian Welty0128fce2017-02-08 05:27:31 -0800482 copy_last = false;
483 in_last = true;
Dean Luick7b0b01a2016-02-03 14:35:49 -0800484 }
485 }
486
487again:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400488 while (length) {
Brian Welty1198fce2017-02-08 05:27:37 -0800489 u32 len = rvt_get_sge_length(sge, length);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400490
Mike Marciniszyn77241052015-07-30 15:17:43 -0400491 WARN_ON_ONCE(len == 0);
Dean Luick528ee9f2016-03-05 08:50:43 -0800492 if (unlikely(in_last)) {
493 /* enforce byte transfer ordering */
Dean Luick7b0b01a2016-02-03 14:35:49 -0800494 for (i = 0; i < len; i++)
495 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
Dean Luick528ee9f2016-03-05 08:50:43 -0800496 } else if (cacheless_copy) {
497 cacheless_memcpy(sge->vaddr, data, len);
Dean Luick7b0b01a2016-02-03 14:35:49 -0800498 } else {
499 memcpy(sge->vaddr, data, len);
500 }
Brian Welty1198fce2017-02-08 05:27:37 -0800501 rvt_update_sge(ss, len, release);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400502 data += len;
503 length -= len;
504 }
Dean Luick7b0b01a2016-02-03 14:35:49 -0800505
506 if (copy_last) {
Brian Welty0128fce2017-02-08 05:27:31 -0800507 copy_last = false;
508 in_last = true;
Dean Luick7b0b01a2016-02-03 14:35:49 -0800509 length = 8;
510 goto again;
511 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400512}
513
Mike Marciniszyn77241052015-07-30 15:17:43 -0400514/*
515 * Make sure the QP is ready and able to accept the given opcode.
516 */
Don Hiatt90397462017-05-12 09:20:20 -0700517static inline opcode_handler qp_ok(struct hfi1_packet *packet)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400518{
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800519 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700520 return NULL;
Don Hiatt90397462017-05-12 09:20:20 -0700521 if (((packet->opcode & RVT_OPCODE_QP_MASK) ==
522 packet->qp->allowed_ops) ||
523 (packet->opcode == IB_OPCODE_CNP))
524 return opcode_handler_tbl[packet->opcode];
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700525
526 return NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400527}
528
Don Hiatt243d9f42017-03-20 17:26:20 -0700529static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
530{
531#ifdef CONFIG_FAULT_INJECTION
532 if ((opcode & IB_OPCODE_MSP) == IB_OPCODE_MSP)
533 /*
534 * In order to drop non-IB traffic we
535 * set PbcInsertHrc to NONE (0x2).
536 * The packet will still be delivered
537 * to the receiving node but a
538 * KHdrHCRCErr (KDETH packet with a bad
539 * HCRC) will be triggered and the
540 * packet will not be delivered to the
541 * correct context.
542 */
543 pbc |= (u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT;
544 else
545 /*
546 * In order to drop regular verbs
547 * traffic we set the PbcTestEbp
548 * flag. The packet will still be
549 * delivered to the receiving node but
550 * a 'late ebp error' will be
551 * triggered and will be dropped.
552 */
553 pbc |= PBC_TEST_EBP;
554#endif
555 return pbc;
556}
557
Don Hiatt5786adf32017-08-04 13:54:10 -0700558static int hfi1_do_pkey_check(struct hfi1_packet *packet)
559{
560 struct hfi1_ctxtdata *rcd = packet->rcd;
561 struct hfi1_pportdata *ppd = rcd->ppd;
562 struct hfi1_16b_header *hdr = packet->hdr;
563 u16 pkey;
564
565 /* Pkey check needed only for bypass packets */
566 if (packet->etype != RHF_RCV_TYPE_BYPASS)
567 return 0;
568
569 /* Perform pkey check */
570 pkey = hfi1_16B_get_pkey(hdr);
571 return ingress_pkey_check(ppd, pkey, packet->sc,
572 packet->qp->s_pkey_index,
573 packet->slid, true);
574}
575
Don Hiatt90397462017-05-12 09:20:20 -0700576static inline void hfi1_handle_packet(struct hfi1_packet *packet,
577 bool is_mcast)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400578{
Don Hiatt90397462017-05-12 09:20:20 -0700579 u32 qp_num;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400580 struct hfi1_ctxtdata *rcd = packet->rcd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400581 struct hfi1_pportdata *ppd = rcd->ppd;
Sebastian Sanchezf3e862c2017-02-08 05:26:25 -0800582 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800583 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700584 opcode_handler packet_handler;
Dean Luickb77d7132015-10-26 10:28:43 -0400585 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400586
Don Hiatt90397462017-05-12 09:20:20 -0700587 inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400588
Don Hiatt90397462017-05-12 09:20:20 -0700589 if (unlikely(is_mcast)) {
Dennis Dalessandro0facc5a2016-01-19 14:43:39 -0800590 struct rvt_mcast *mcast;
591 struct rvt_mcast_qp *p;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400592
Don Hiatt90397462017-05-12 09:20:20 -0700593 if (!packet->grh)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400594 goto drop;
Don Hiatt90397462017-05-12 09:20:20 -0700595 mcast = rvt_mcast_find(&ibp->rvp,
596 &packet->grh->dgid,
Don Hiatt72c07e22017-08-04 13:53:58 -0700597 opa_get_lid(packet->dlid, 9B));
Jubin Johnd125a6c2016-02-14 20:19:49 -0800598 if (!mcast)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400599 goto drop;
600 list_for_each_entry_rcu(p, &mcast->qp_list, list) {
601 packet->qp = p->qp;
Don Hiatt5786adf32017-08-04 13:54:10 -0700602 if (hfi1_do_pkey_check(packet))
603 goto drop;
Dean Luickb77d7132015-10-26 10:28:43 -0400604 spin_lock_irqsave(&packet->qp->r_lock, flags);
Don Hiatt90397462017-05-12 09:20:20 -0700605 packet_handler = qp_ok(packet);
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700606 if (likely(packet_handler))
607 packet_handler(packet);
608 else
609 ibp->rvp.n_pkt_drops++;
Dean Luickb77d7132015-10-26 10:28:43 -0400610 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400611 }
612 /*
Dennis Dalessandro0facc5a2016-01-19 14:43:39 -0800613 * Notify rvt_multicast_detach() if it is waiting for us
Mike Marciniszyn77241052015-07-30 15:17:43 -0400614 * to finish.
615 */
616 if (atomic_dec_return(&mcast->refcount) <= 1)
617 wake_up(&mcast->wait);
618 } else {
Don Hiatt90397462017-05-12 09:20:20 -0700619 /* Get the destination QP number. */
Don Hiatt81cd3892018-05-15 18:28:15 -0700620 if (packet->etype == RHF_RCV_TYPE_BYPASS &&
621 hfi1_16B_get_l4(packet->hdr) == OPA_16B_L4_FM)
622 qp_num = hfi1_16B_get_dest_qpn(packet->mgmt);
623 else
624 qp_num = ib_bth_get_qpn(packet->ohdr);
625
Mike Marciniszyn77241052015-07-30 15:17:43 -0400626 rcu_read_lock();
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800627 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
Don Hiatt5786adf32017-08-04 13:54:10 -0700628 if (!packet->qp)
629 goto unlock_drop;
630
631 if (hfi1_do_pkey_check(packet))
632 goto unlock_drop;
633
Dean Luickb77d7132015-10-26 10:28:43 -0400634 spin_lock_irqsave(&packet->qp->r_lock, flags);
Don Hiatt90397462017-05-12 09:20:20 -0700635 packet_handler = qp_ok(packet);
Jakub Pawlak71e68e32016-07-01 16:02:02 -0700636 if (likely(packet_handler))
637 packet_handler(packet);
638 else
639 ibp->rvp.n_pkt_drops++;
Dean Luickb77d7132015-10-26 10:28:43 -0400640 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400641 rcu_read_unlock();
642 }
643 return;
Don Hiatt5786adf32017-08-04 13:54:10 -0700644unlock_drop:
645 rcu_read_unlock();
Mike Marciniszyn77241052015-07-30 15:17:43 -0400646drop:
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800647 ibp->rvp.n_pkt_drops++;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400648}
649
Don Hiatt90397462017-05-12 09:20:20 -0700650/**
651 * hfi1_ib_rcv - process an incoming packet
652 * @packet: data packet information
653 *
654 * This is called to process an incoming packet at interrupt level.
655 */
656void hfi1_ib_rcv(struct hfi1_packet *packet)
657{
658 struct hfi1_ctxtdata *rcd = packet->rcd;
Don Hiatt90397462017-05-12 09:20:20 -0700659
Don Hiatt72c07e22017-08-04 13:53:58 -0700660 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
661 hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
662}
Don Hiatt90397462017-05-12 09:20:20 -0700663
Don Hiatt72c07e22017-08-04 13:53:58 -0700664void hfi1_16B_rcv(struct hfi1_packet *packet)
665{
666 struct hfi1_ctxtdata *rcd = packet->rcd;
667
668 trace_input_ibhdr(rcd->dd, packet, false);
669 hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
Don Hiatt90397462017-05-12 09:20:20 -0700670}
671
Mike Marciniszyn77241052015-07-30 15:17:43 -0400672/*
673 * This is called from a timer to check for QPs
674 * which need kernel memory in order to send a packet.
675 */
Kees Cook80641352017-10-16 15:51:54 -0700676static void mem_timer(struct timer_list *t)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400677{
Kees Cook80641352017-10-16 15:51:54 -0700678 struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400679 struct list_head *list = &dev->memwait;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800680 struct rvt_qp *qp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400681 struct iowait *wait;
682 unsigned long flags;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800683 struct hfi1_qp_priv *priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400684
685 write_seqlock_irqsave(&dev->iowait_lock, flags);
686 if (!list_empty(list)) {
687 wait = list_first_entry(list, struct iowait, list);
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800688 qp = iowait_to_qp(wait);
689 priv = qp->priv;
690 list_del_init(&priv->s_iowait.list);
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700691 priv->s_iowait.lock = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400692 /* refcount held until actual wake up */
693 if (!list_empty(list))
694 mod_timer(&dev->mem_timer, jiffies + 1);
695 }
696 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
697
698 if (qp)
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800699 hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400700}
701
Mike Marciniszyn77241052015-07-30 15:17:43 -0400702/*
703 * This is called with progress side lock held.
704 */
705/* New API */
706static void verbs_sdma_complete(
707 struct sdma_txreq *cookie,
Mike Marciniszyna545f532016-02-14 12:45:53 -0800708 int status)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400709{
710 struct verbs_txreq *tx =
711 container_of(cookie, struct verbs_txreq, txreq);
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800712 struct rvt_qp *qp = tx->qp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400713
714 spin_lock(&qp->s_lock);
Jubin Johne4909742016-02-14 20:22:00 -0800715 if (tx->wqe) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400716 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
Jubin Johne4909742016-02-14 20:22:00 -0800717 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
Don Hiatt30e07412017-08-04 13:54:04 -0700718 struct hfi1_opa_header *hdr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400719
720 hdr = &tx->phdr.hdr;
721 hfi1_rc_send_complete(qp, hdr);
722 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400723 spin_unlock(&qp->s_lock);
724
725 hfi1_put_txreq(tx);
726}
727
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800728static int wait_kmem(struct hfi1_ibdev *dev,
729 struct rvt_qp *qp,
730 struct hfi1_pkt_state *ps)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400731{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800732 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400733 unsigned long flags;
734 int ret = 0;
735
736 spin_lock_irqsave(&qp->s_lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800737 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400738 write_seqlock(&dev->iowait_lock);
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800739 list_add_tail(&ps->s_txreq->txreq.list,
740 &priv->s_iowait.tx_head);
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800741 if (list_empty(&priv->s_iowait.list)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400742 if (list_empty(&dev->memwait))
743 mod_timer(&dev->mem_timer, jiffies + 1);
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800744 qp->s_flags |= RVT_S_WAIT_KMEM;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800745 list_add_tail(&priv->s_iowait.list, &dev->memwait);
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700746 priv->s_iowait.lock = &dev->iowait_lock;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800747 trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -0700748 rvt_get_qp(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400749 }
750 write_sequnlock(&dev->iowait_lock);
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800751 qp->s_flags &= ~RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400752 ret = -EBUSY;
753 }
754 spin_unlock_irqrestore(&qp->s_lock, flags);
755
756 return ret;
757}
758
759/*
760 * This routine calls txadds for each sg entry.
761 *
762 * Add failures will revert the sge cursor
763 */
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800764static noinline int build_verbs_ulp_payload(
Mike Marciniszyn77241052015-07-30 15:17:43 -0400765 struct sdma_engine *sde,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400766 u32 length,
767 struct verbs_txreq *tx)
768{
Mitko Haralanovb777f152016-12-07 19:33:27 -0800769 struct rvt_sge_state *ss = tx->ss;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800770 struct rvt_sge *sg_list = ss->sg_list;
771 struct rvt_sge sge = ss->sge;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400772 u8 num_sge = ss->num_sge;
773 u32 len;
774 int ret = 0;
775
776 while (length) {
777 len = ss->sge.length;
778 if (len > length)
779 len = length;
780 if (len > ss->sge.sge_length)
781 len = ss->sge.sge_length;
782 WARN_ON_ONCE(len == 0);
783 ret = sdma_txadd_kvaddr(
784 sde->dd,
785 &tx->txreq,
786 ss->sge.vaddr,
787 len);
788 if (ret)
789 goto bail_txadd;
Brian Welty1198fce2017-02-08 05:27:37 -0800790 rvt_update_sge(ss, len, false);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400791 length -= len;
792 }
793 return ret;
794bail_txadd:
795 /* unwind cursor */
796 ss->sge = sge;
797 ss->num_sge = num_sge;
798 ss->sg_list = sg_list;
799 return ret;
800}
801
Mike Marciniszyn1b311f82017-10-23 06:06:08 -0700802/**
803 * update_tx_opstats - record stats by opcode
804 * @qp; the qp
805 * @ps: transmit packet state
806 * @plen: the plen in dwords
807 *
808 * This is a routine to record the tx opstats after a
809 * packet has been presented to the egress mechanism.
810 */
811static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
812 u32 plen)
813{
814#ifdef CONFIG_DEBUG_FS
815 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
816 struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
817
818 inc_opstats(plen * 4, &s->stats[ps->opcode]);
819 put_cpu_ptr(s);
820#endif
821}
822
Mike Marciniszyn77241052015-07-30 15:17:43 -0400823/*
824 * Build the number of DMA descriptors needed to send length bytes of data.
825 *
826 * NOTE: DMA mapping is held in the tx until completed in the ring or
827 * the tx desc is freed without having been submitted to the ring
828 *
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800829 * This routine ensures all the helper routine calls succeed.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400830 */
831/* New API */
832static int build_verbs_tx_desc(
833 struct sdma_engine *sde,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400834 u32 length,
835 struct verbs_txreq *tx,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700836 struct hfi1_ahg_info *ahg_info,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400837 u64 pbc)
838{
839 int ret = 0;
Don Hiattd4d602e2016-07-25 13:40:22 -0700840 struct hfi1_sdma_header *phdr = &tx->phdr;
Mitko Haralanov96362582018-02-01 10:46:07 -0800841 u16 hdrbytes = (tx->hdr_dwords + sizeof(pbc) / 4) << 2;
Don Hiatt566d53a2017-08-04 13:54:47 -0700842 u8 extra_bytes = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400843
Don Hiatt566d53a2017-08-04 13:54:47 -0700844 if (tx->phdr.hdr.hdr_type) {
845 /*
846 * hdrbytes accounts for PBC. Need to subtract 8 bytes
847 * before calculating padding.
848 */
849 extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
850 (SIZE_OF_CRC << 2) + SIZE_OF_LT;
Don Hiatt566d53a2017-08-04 13:54:47 -0700851 }
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700852 if (!ahg_info->ahgcount) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400853 ret = sdma_txinit_ahg(
854 &tx->txreq,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700855 ahg_info->tx_flags,
Don Hiatt566d53a2017-08-04 13:54:47 -0700856 hdrbytes + length +
857 extra_bytes,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700858 ahg_info->ahgidx,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400859 0,
860 NULL,
861 0,
862 verbs_sdma_complete);
863 if (ret)
864 goto bail_txadd;
865 phdr->pbc = cpu_to_le64(pbc);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400866 ret = sdma_txadd_kvaddr(
867 sde->dd,
868 &tx->txreq,
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800869 phdr,
870 hdrbytes);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400871 if (ret)
872 goto bail_txadd;
873 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400874 ret = sdma_txinit_ahg(
875 &tx->txreq,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700876 ahg_info->tx_flags,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400877 length,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700878 ahg_info->ahgidx,
879 ahg_info->ahgcount,
880 ahg_info->ahgdesc,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400881 hdrbytes,
882 verbs_sdma_complete);
883 if (ret)
884 goto bail_txadd;
885 }
Mitko Haralanovb777f152016-12-07 19:33:27 -0800886 /* add the ulp payload - if any. tx->ss can be NULL for acks */
Don Hiatt566d53a2017-08-04 13:54:47 -0700887 if (tx->ss) {
Mitko Haralanovb777f152016-12-07 19:33:27 -0800888 ret = build_verbs_ulp_payload(sde, length, tx);
Don Hiatt566d53a2017-08-04 13:54:47 -0700889 if (ret)
890 goto bail_txadd;
891 }
892
893 /* add icrc, lt byte, and padding to flit */
Don Hiattf8195f32017-10-09 12:38:19 -0700894 if (extra_bytes)
Don Hiatt566d53a2017-08-04 13:54:47 -0700895 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
Don Hiattf8195f32017-10-09 12:38:19 -0700896 (void *)trail_buf, extra_bytes);
Don Hiatt566d53a2017-08-04 13:54:47 -0700897
Mike Marciniszyn77241052015-07-30 15:17:43 -0400898bail_txadd:
899 return ret;
900}
901
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800902int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500903 u64 pbc)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400904{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800905 struct hfi1_qp_priv *priv = qp->priv;
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700906 struct hfi1_ahg_info *ahg_info = priv->s_ahg;
Mitko Haralanov96362582018-02-01 10:46:07 -0800907 u32 hdrwords = ps->s_txreq->hdr_dwords;
Don Hiatte922ae02016-12-07 19:33:00 -0800908 u32 len = ps->s_txreq->s_cur_size;
Don Hiatt566d53a2017-08-04 13:54:47 -0700909 u32 plen;
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500910 struct hfi1_ibdev *dev = ps->dev;
911 struct hfi1_pportdata *ppd = ps->ppd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400912 struct verbs_txreq *tx;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800913 u8 sc5 = priv->s_sc;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400914 int ret;
Don Hiatt566d53a2017-08-04 13:54:47 -0700915 u32 dwords;
Don Hiatt566d53a2017-08-04 13:54:47 -0700916
917 if (ps->s_txreq->phdr.hdr.hdr_type) {
918 u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
919
920 dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
921 SIZE_OF_LT) >> 2;
Don Hiatt566d53a2017-08-04 13:54:47 -0700922 } else {
923 dwords = (len + 3) >> 2;
924 }
Mitko Haralanov96362582018-02-01 10:46:07 -0800925 plen = hdrwords + dwords + sizeof(pbc) / 4;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400926
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800927 tx = ps->s_txreq;
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800928 if (!sdma_txreq_built(&tx->txreq)) {
929 if (likely(pbc == 0)) {
930 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
Don Hiatt243d9f42017-03-20 17:26:20 -0700931
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800932 /* No vl15 here */
Don Hiatt566d53a2017-08-04 13:54:47 -0700933 /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
934 if (ps->s_txreq->phdr.hdr.hdr_type)
935 pbc |= PBC_PACKET_BYPASS |
936 PBC_INSERT_BYPASS_ICRC;
937 else
938 pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800939
Mitko Haralanova74d5302018-05-02 06:43:24 -0700940 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
Don Hiatt566d53a2017-08-04 13:54:47 -0700941 pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800942 pbc = create_pbc(ppd,
Don Hiatt243d9f42017-03-20 17:26:20 -0700943 pbc,
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800944 qp->srate_mbps,
945 vl,
946 plen);
947 }
948 tx->wqe = qp->s_wqe;
Mitko Haralanovb777f152016-12-07 19:33:27 -0800949 ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800950 if (unlikely(ret))
951 goto bail_build;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400952 }
Kaike Wanbcad2912017-07-24 07:45:37 -0700953 ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq,
954 ps->pkts_sent);
Mike Marciniszyn5326dfb2016-03-07 11:35:24 -0800955 if (unlikely(ret < 0)) {
956 if (ret == -ECOMM)
957 goto bail_ecomm;
958 return ret;
959 }
Mike Marciniszyn1b311f82017-10-23 06:06:08 -0700960
961 update_tx_opstats(qp, ps, plen);
Mike Marciniszyn1db78ee2016-03-07 11:35:19 -0800962 trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
Don Hiatt228d2af2017-05-12 09:20:08 -0700963 &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
Mike Marciniszyn77241052015-07-30 15:17:43 -0400964 return ret;
965
Mike Marciniszyn77241052015-07-30 15:17:43 -0400966bail_ecomm:
967 /* The current one got "sent" */
968 return 0;
969bail_build:
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800970 ret = wait_kmem(dev, qp, ps);
971 if (!ret) {
972 /* free txreq - bad state */
973 hfi1_put_txreq(ps->s_txreq);
974 ps->s_txreq = NULL;
975 }
976 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400977}
978
979/*
980 * If we are now in the error state, return zero to flush the
981 * send work request.
982 */
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800983static int pio_wait(struct rvt_qp *qp,
984 struct send_context *sc,
985 struct hfi1_pkt_state *ps,
986 u32 flag)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400987{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800988 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400989 struct hfi1_devdata *dd = sc->dd;
990 struct hfi1_ibdev *dev = &dd->verbs_dev;
991 unsigned long flags;
992 int ret = 0;
993
994 /*
995 * Note that as soon as want_buffer() is called and
996 * possibly before it returns, sc_piobufavail()
997 * could be called. Therefore, put QP on the I/O wait list before
998 * enabling the PIO avail interrupt.
999 */
1000 spin_lock_irqsave(&qp->s_lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -08001001 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001002 write_seqlock(&dev->iowait_lock);
Mike Marciniszyn711e1042016-02-14 12:45:18 -08001003 list_add_tail(&ps->s_txreq->txreq.list,
1004 &priv->s_iowait.tx_head);
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -08001005 if (list_empty(&priv->s_iowait.list)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001006 struct hfi1_ibdev *dev = &dd->verbs_dev;
1007 int was_empty;
1008
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001009 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
1010 dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001011 qp->s_flags |= flag;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001012 was_empty = list_empty(&sc->piowait);
Kaike Wanbcad2912017-07-24 07:45:37 -07001013 iowait_queue(ps->pkts_sent, &priv->s_iowait,
1014 &sc->piowait);
Mike Marciniszyn4e045572016-10-10 06:14:28 -07001015 priv->s_iowait.lock = &dev->iowait_lock;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -08001016 trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -07001017 rvt_get_qp(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001018 /* counting: only call wantpiobuf_intr if first user */
1019 if (was_empty)
1020 hfi1_sc_wantpiobuf_intr(sc, 1);
1021 }
1022 write_sequnlock(&dev->iowait_lock);
Dennis Dalessandro54d10c12016-01-19 14:43:01 -08001023 qp->s_flags &= ~RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001024 ret = -EBUSY;
1025 }
1026 spin_unlock_irqrestore(&qp->s_lock, flags);
1027 return ret;
1028}
1029
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001030static void verbs_pio_complete(void *arg, int code)
1031{
1032 struct rvt_qp *qp = (struct rvt_qp *)arg;
1033 struct hfi1_qp_priv *priv = qp->priv;
1034
1035 if (iowait_pio_dec(&priv->s_iowait))
1036 iowait_drain_wakeup(&priv->s_iowait);
1037}
1038
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001039int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
Dennis Dalessandrod46e5142015-11-11 00:34:37 -05001040 u64 pbc)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001041{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -08001042 struct hfi1_qp_priv *priv = qp->priv;
Mitko Haralanov96362582018-02-01 10:46:07 -08001043 u32 hdrwords = ps->s_txreq->hdr_dwords;
Mitko Haralanovb777f152016-12-07 19:33:27 -08001044 struct rvt_sge_state *ss = ps->s_txreq->ss;
Don Hiatte922ae02016-12-07 19:33:00 -08001045 u32 len = ps->s_txreq->s_cur_size;
Don Hiatt566d53a2017-08-04 13:54:47 -07001046 u32 dwords;
1047 u32 plen;
Dennis Dalessandrod46e5142015-11-11 00:34:37 -05001048 struct hfi1_pportdata *ppd = ps->ppd;
Don Hiatt566d53a2017-08-04 13:54:47 -07001049 u32 *hdr;
Mike Marciniszyn4f8cc5c2016-02-14 12:45:27 -08001050 u8 sc5;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001051 unsigned long flags = 0;
1052 struct send_context *sc;
1053 struct pio_buf *pbuf;
1054 int wc_status = IB_WC_SUCCESS;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -08001055 int ret = 0;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001056 pio_release_cb cb = NULL;
Don Hiatt566d53a2017-08-04 13:54:47 -07001057 u8 extra_bytes = 0;
1058
1059 if (ps->s_txreq->phdr.hdr.hdr_type) {
1060 u8 pad_size = hfi1_get_16b_padding((hdrwords << 2), len);
1061
1062 extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
1063 dwords = (len + extra_bytes) >> 2;
1064 hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
Don Hiatt566d53a2017-08-04 13:54:47 -07001065 } else {
1066 dwords = (len + 3) >> 2;
1067 hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
1068 }
Mitko Haralanov96362582018-02-01 10:46:07 -08001069 plen = hdrwords + dwords + sizeof(pbc) / 4;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001070
1071 /* only RC/UC use complete */
1072 switch (qp->ibqp.qp_type) {
1073 case IB_QPT_RC:
1074 case IB_QPT_UC:
1075 cb = verbs_pio_complete;
1076 break;
1077 default:
1078 break;
1079 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001080
1081 /* vl15 special case taken care of in ud.c */
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -08001082 sc5 = priv->s_sc;
Mike Marciniszyncef504c2016-03-07 11:35:35 -08001083 sc = ps->s_txreq->psc;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001084
Mike Marciniszyn77241052015-07-30 15:17:43 -04001085 if (likely(pbc == 0)) {
Mike Marciniszyn4f8cc5c2016-02-14 12:45:27 -08001086 u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
Don Hiatt243d9f42017-03-20 17:26:20 -07001087
Don Hiatt566d53a2017-08-04 13:54:47 -07001088 /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
1089 if (ps->s_txreq->phdr.hdr.hdr_type)
1090 pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
1091 else
1092 pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
Mitko Haralanova74d5302018-05-02 06:43:24 -07001093
1094 if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
Don Hiatt566d53a2017-08-04 13:54:47 -07001095 pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
Don Hiatt243d9f42017-03-20 17:26:20 -07001096 pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001097 }
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001098 if (cb)
1099 iowait_pio_inc(&priv->s_iowait);
1100 pbuf = sc_buffer_alloc(sc, plen, cb, qp);
Jubin Johnd125a6c2016-02-14 20:19:49 -08001101 if (unlikely(!pbuf)) {
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001102 if (cb)
1103 verbs_pio_complete(qp, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001104 if (ppd->host_link_state != HLS_UP_ACTIVE) {
1105 /*
1106 * If we have filled the PIO buffers to capacity and are
1107 * not in an active state this request is not going to
1108 * go out to so just complete it with an error or else a
1109 * ULP or the core may be stuck waiting.
1110 */
1111 hfi1_cdbg(
1112 PIO,
1113 "alloc failed. state not active, completing");
1114 wc_status = IB_WC_GENERAL_ERR;
1115 goto pio_bail;
1116 } else {
1117 /*
1118 * This is a normal occurrence. The PIO buffs are full
1119 * up but we are still happily sending, well we could be
1120 * so lets continue to queue the request.
1121 */
1122 hfi1_cdbg(PIO, "alloc failed. state active, queuing");
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001123 ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
Mike Marciniszyn711e1042016-02-14 12:45:18 -08001124 if (!ret)
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001125 /* txreq not queued - free */
Mike Marciniszyn711e1042016-02-14 12:45:18 -08001126 goto bail;
1127 /* tx consumed in wait */
1128 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001129 }
1130 }
1131
Don Hiatt566d53a2017-08-04 13:54:47 -07001132 if (dwords == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001133 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
1134 } else {
Don Hiatt566d53a2017-08-04 13:54:47 -07001135 seg_pio_copy_start(pbuf, pbc,
1136 hdr, hdrwords * 4);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001137 if (ss) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001138 while (len) {
1139 void *addr = ss->sge.vaddr;
1140 u32 slen = ss->sge.length;
1141
1142 if (slen > len)
1143 slen = len;
Brian Welty1198fce2017-02-08 05:27:37 -08001144 rvt_update_sge(ss, slen, false);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001145 seg_pio_copy_mid(pbuf, addr, slen);
1146 len -= slen;
1147 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001148 }
Don Hiattf8195f32017-10-09 12:38:19 -07001149 /* add icrc, lt byte, and padding to flit */
1150 if (extra_bytes)
1151 seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
Don Hiatt566d53a2017-08-04 13:54:47 -07001152
Don Hiatt566d53a2017-08-04 13:54:47 -07001153 seg_pio_copy_end(pbuf);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001154 }
1155
Mike Marciniszyn1b311f82017-10-23 06:06:08 -07001156 update_tx_opstats(qp, ps, plen);
Mike Marciniszyn1db78ee2016-03-07 11:35:19 -08001157 trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
Don Hiatt228d2af2017-05-12 09:20:08 -07001158 &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001159
Mike Marciniszyn77241052015-07-30 15:17:43 -04001160pio_bail:
1161 if (qp->s_wqe) {
1162 spin_lock_irqsave(&qp->s_lock, flags);
1163 hfi1_send_complete(qp, qp->s_wqe, wc_status);
1164 spin_unlock_irqrestore(&qp->s_lock, flags);
1165 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1166 spin_lock_irqsave(&qp->s_lock, flags);
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -08001167 hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001168 spin_unlock_irqrestore(&qp->s_lock, flags);
1169 }
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -08001170
1171 ret = 0;
1172
1173bail:
1174 hfi1_put_txreq(ps->s_txreq);
1175 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001176}
Geliang Tangb91cc572015-09-21 23:39:08 +08001177
Mike Marciniszyn77241052015-07-30 15:17:43 -04001178/*
1179 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001180 * being an entry from the partition key table), return 0
Mike Marciniszyn77241052015-07-30 15:17:43 -04001181 * otherwise. Use the matching criteria for egress partition keys
1182 * specified in the OPAv1 spec., section 9.1l.7.
1183 */
1184static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
1185{
1186 u16 mkey = pkey & PKEY_LOW_15_MASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001187 u16 mentry = ent & PKEY_LOW_15_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001188
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001189 if (mkey == mentry) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001190 /*
1191 * If pkey[15] is set (full partition member),
1192 * is bit 15 in the corresponding table element
1193 * clear (limited member)?
1194 */
1195 if (pkey & PKEY_MEMBER_MASK)
1196 return !!(ent & PKEY_MEMBER_MASK);
1197 return 1;
1198 }
1199 return 0;
1200}
1201
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001202/**
1203 * egress_pkey_check - check P_KEY of a packet
Don Hiatt566d53a2017-08-04 13:54:47 -07001204 * @ppd: Physical IB port data
1205 * @slid: SLID for packet
1206 * @bkey: PKEY for header
1207 * @sc5: SC for packet
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001208 * @s_pkey_index: It will be used for look up optimization for kernel contexts
1209 * only. If it is negative value, then it means user contexts is calling this
1210 * function.
1211 *
1212 * It checks if hdr's pkey is valid.
1213 *
1214 * Return: 0 on success, otherwise, 1
Mike Marciniszyn77241052015-07-30 15:17:43 -04001215 */
Don Hiatt566d53a2017-08-04 13:54:47 -07001216int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001217 u8 sc5, int8_t s_pkey_index)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001218{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001219 struct hfi1_devdata *dd;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001220 int i;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001221 int is_user_ctxt_mechanism = (s_pkey_index < 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001222
1223 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
1224 return 0;
1225
Mike Marciniszyn77241052015-07-30 15:17:43 -04001226 /* If SC15, pkey[0:14] must be 0x7fff */
1227 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1228 goto bad;
1229
Mike Marciniszyn77241052015-07-30 15:17:43 -04001230 /* Is the pkey = 0x0, or 0x8000? */
1231 if ((pkey & PKEY_LOW_15_MASK) == 0)
1232 goto bad;
1233
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001234 /*
1235 * For the kernel contexts only, if a qp is passed into the function,
1236 * the most likely matching pkey has index qp->s_pkey_index
1237 */
1238 if (!is_user_ctxt_mechanism &&
1239 egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) {
1240 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001241 }
1242
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001243 for (i = 0; i < MAX_PKEY_VALUES; i++) {
1244 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1245 return 0;
1246 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001247bad:
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001248 /*
1249 * For the user-context mechanism, the P_KEY check would only happen
1250 * once per SDMA request, not once per packet. Therefore, there's no
1251 * need to increment the counter for the user-context mechanism.
1252 */
1253 if (!is_user_ctxt_mechanism) {
1254 incr_cntr64(&ppd->port_xmit_constraint_errors);
1255 dd = ppd->dd;
1256 if (!(dd->err_info_xmit_constraint.status &
1257 OPA_EI_STATUS_SMASK)) {
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001258 dd->err_info_xmit_constraint.status |=
1259 OPA_EI_STATUS_SMASK;
1260 dd->err_info_xmit_constraint.slid = slid;
1261 dd->err_info_xmit_constraint.pkey = pkey;
1262 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001263 }
1264 return 1;
1265}
1266
1267/**
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001268 * get_send_routine - choose an egress routine
1269 *
1270 * Choose an egress routine based on QP type
1271 * and size
1272 */
1273static inline send_routine get_send_routine(struct rvt_qp *qp,
Don Hiatt566d53a2017-08-04 13:54:47 -07001274 struct hfi1_pkt_state *ps)
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001275{
1276 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1277 struct hfi1_qp_priv *priv = qp->priv;
Don Hiatt566d53a2017-08-04 13:54:47 -07001278 struct verbs_txreq *tx = ps->s_txreq;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001279
1280 if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
1281 return dd->process_pio_send;
1282 switch (qp->ibqp.qp_type) {
1283 case IB_QPT_SMI:
1284 return dd->process_pio_send;
1285 case IB_QPT_GSI:
1286 case IB_QPT_UD:
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001287 break;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001288 case IB_QPT_UC:
Mike Marciniszynb374e062016-09-25 07:40:58 -07001289 case IB_QPT_RC: {
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001290 if (piothreshold &&
Don Hiatte922ae02016-12-07 19:33:00 -08001291 tx->s_cur_size <= min(piothreshold, qp->pmtu) &&
Don Hiatt566d53a2017-08-04 13:54:47 -07001292 (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) &&
Mike Marciniszyn47177f12016-03-07 11:35:41 -08001293 iowait_sdma_pending(&priv->s_iowait) == 0 &&
1294 !sdma_txreq_built(&tx->txreq))
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001295 return dd->process_pio_send;
1296 break;
Mike Marciniszynb374e062016-09-25 07:40:58 -07001297 }
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001298 default:
1299 break;
1300 }
1301 return dd->process_dma_send;
1302}
1303
1304/**
Mike Marciniszyn77241052015-07-30 15:17:43 -04001305 * hfi1_verbs_send - send a packet
1306 * @qp: the QP to send on
Dennis Dalessandrod46e5142015-11-11 00:34:37 -05001307 * @ps: the state of the packet to send
Mike Marciniszyn77241052015-07-30 15:17:43 -04001308 *
1309 * Return zero if packet is sent or queued OK.
Dennis Dalessandro54d10c12016-01-19 14:43:01 -08001310 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001311 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001312int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001313{
1314 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
Mike Marciniszyn47177f12016-03-07 11:35:41 -08001315 struct hfi1_qp_priv *priv = qp->priv;
Don Hiatt81cd3892018-05-15 18:28:15 -07001316 struct ib_other_headers *ohdr = NULL;
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001317 send_routine sr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001318 int ret;
Don Hiatt566d53a2017-08-04 13:54:47 -07001319 u16 pkey;
1320 u32 slid;
Don Hiatt81cd3892018-05-15 18:28:15 -07001321 u8 l4 = 0;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001322
Sebastian Sancheze38d1e42016-04-12 11:22:21 -07001323 /* locate the pkey within the headers */
Don Hiatt566d53a2017-08-04 13:54:47 -07001324 if (ps->s_txreq->phdr.hdr.hdr_type) {
1325 struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001326
Don Hiatt81cd3892018-05-15 18:28:15 -07001327 l4 = hfi1_16B_get_l4(hdr);
1328 if (l4 == OPA_16B_L4_IB_LOCAL)
Don Hiatt566d53a2017-08-04 13:54:47 -07001329 ohdr = &hdr->u.oth;
Don Hiatt81cd3892018-05-15 18:28:15 -07001330 else if (l4 == OPA_16B_L4_IB_GLOBAL)
1331 ohdr = &hdr->u.l.oth;
1332
Don Hiatt566d53a2017-08-04 13:54:47 -07001333 slid = hfi1_16B_get_slid(hdr);
1334 pkey = hfi1_16B_get_pkey(hdr);
1335 } else {
1336 struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh;
1337 u8 lnh = ib_get_lnh(hdr);
1338
1339 if (lnh == HFI1_LRH_GRH)
1340 ohdr = &hdr->u.l.oth;
1341 else
1342 ohdr = &hdr->u.oth;
1343 slid = ib_get_slid(hdr);
1344 pkey = ib_bth_get_pkey(ohdr);
1345 }
1346
Don Hiatt81cd3892018-05-15 18:28:15 -07001347 if (likely(l4 != OPA_16B_L4_FM))
1348 ps->opcode = ib_bth_get_opcode(ohdr);
1349 else
1350 ps->opcode = IB_OPCODE_UD_SEND_ONLY;
1351
Don Hiatt566d53a2017-08-04 13:54:47 -07001352 sr = get_send_routine(qp, ps);
1353 ret = egress_pkey_check(dd->pport, slid, pkey,
1354 priv->s_sc, qp->s_pkey_index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001355 if (unlikely(ret)) {
1356 /*
1357 * The value we are returning here does not get propagated to
1358 * the verbs caller. Thus we need to complete the request with
1359 * error otherwise the caller could be sitting waiting on the
1360 * completion event. Only do this for PIO. SDMA has its own
1361 * mechanism for handling the errors. So for SDMA we can just
1362 * return.
1363 */
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001364 if (sr == dd->process_pio_send) {
1365 unsigned long flags;
1366
Mike Marciniszyn77241052015-07-30 15:17:43 -04001367 hfi1_cdbg(PIO, "%s() Failed. Completing with err",
1368 __func__);
1369 spin_lock_irqsave(&qp->s_lock, flags);
1370 hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
1371 spin_unlock_irqrestore(&qp->s_lock, flags);
1372 }
1373 return -EINVAL;
1374 }
Mike Marciniszyn47177f12016-03-07 11:35:41 -08001375 if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
1376 return pio_wait(qp,
1377 ps->s_txreq->psc,
1378 ps,
1379 RVT_S_WAIT_PIO_DRAIN);
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001380 return sr(qp, ps, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001381}
1382
Harish Chegondi94d51712016-01-19 14:43:17 -08001383/**
1384 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1385 * @dd: the device data structure
1386 */
1387static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001388{
Harish Chegondi94d51712016-01-19 14:43:17 -08001389 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07001390 u32 ver = dd->dc8051_ver;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001391
Harish Chegondi94d51712016-01-19 14:43:17 -08001392 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001393
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07001394 rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 32) |
1395 ((u64)(dc8051_ver_min(ver)) << 16) |
1396 (u64)dc8051_ver_patch(ver);
1397
Harish Chegondi94d51712016-01-19 14:43:17 -08001398 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1399 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1400 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
Jianxin Xiongc72cfe32016-07-25 13:38:43 -07001401 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001402 IB_DEVICE_MEM_MGT_EXTENSIONS |
1403 IB_DEVICE_RDMA_NETDEV_OPA_VNIC;
Harish Chegondi94d51712016-01-19 14:43:17 -08001404 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1405 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
1406 rdi->dparms.props.vendor_part_id = dd->pcidev->device;
1407 rdi->dparms.props.hw_ver = dd->minrev;
1408 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
Jianxin Xiongc72cfe32016-07-25 13:38:43 -07001409 rdi->dparms.props.max_mr_size = U64_MAX;
1410 rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
Harish Chegondi94d51712016-01-19 14:43:17 -08001411 rdi->dparms.props.max_qp = hfi1_max_qps;
1412 rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
1413 rdi->dparms.props.max_sge = hfi1_max_sges;
1414 rdi->dparms.props.max_sge_rd = hfi1_max_sges;
1415 rdi->dparms.props.max_cq = hfi1_max_cqs;
1416 rdi->dparms.props.max_ah = hfi1_max_ahs;
1417 rdi->dparms.props.max_cqe = hfi1_max_cqes;
1418 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1419 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1420 rdi->dparms.props.max_map_per_fmr = 32767;
1421 rdi->dparms.props.max_pd = hfi1_max_pds;
1422 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1423 rdi->dparms.props.max_qp_init_rd_atom = 255;
1424 rdi->dparms.props.max_srq = hfi1_max_srqs;
1425 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
1426 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
1427 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1428 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
1429 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
1430 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
1431 rdi->dparms.props.max_total_mcast_qp_attach =
1432 rdi->dparms.props.max_mcast_qp_attach *
1433 rdi->dparms.props.max_mcast_grp;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001434}
1435
1436static inline u16 opa_speed_to_ib(u16 in)
1437{
1438 u16 out = 0;
1439
1440 if (in & OPA_LINK_SPEED_25G)
1441 out |= IB_SPEED_EDR;
1442 if (in & OPA_LINK_SPEED_12_5G)
1443 out |= IB_SPEED_FDR;
1444
1445 return out;
1446}
1447
1448/*
1449 * Convert a single OPA link width (no multiple flags) to an IB value.
1450 * A zero OPA link width means link down, which means the IB width value
1451 * is a don't care.
1452 */
1453static inline u16 opa_width_to_ib(u16 in)
1454{
1455 switch (in) {
1456 case OPA_LINK_WIDTH_1X:
1457 /* map 2x and 3x to 1x as they don't exist in IB */
1458 case OPA_LINK_WIDTH_2X:
1459 case OPA_LINK_WIDTH_3X:
1460 return IB_WIDTH_1X;
1461 default: /* link down or unknown, return our largest width */
1462 case OPA_LINK_WIDTH_4X:
1463 return IB_WIDTH_4X;
1464 }
1465}
1466
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001467static int query_port(struct rvt_dev_info *rdi, u8 port_num,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001468 struct ib_port_attr *props)
1469{
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001470 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1471 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1472 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
Dasaratharaman Chandramouli51e658f52017-08-04 13:54:35 -07001473 u32 lid = ppd->lid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001474
Or Gerlitzc4550c62017-01-24 13:02:39 +02001475 /* props being zeroed by the caller, avoid zeroing it here */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001476 props->lid = lid ? lid : 0;
1477 props->lmc = ppd->lmc;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001478 /* OPA logical states match IB logical states */
1479 props->state = driver_lstate(ppd);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -07001480 props->phys_state = driver_pstate(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001481 props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001482 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
1483 /* see rate_show() in ib core/sysfs.c */
1484 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
1485 props->max_vl_num = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001486
1487 /* Once we are a "first class" citizen and have added the OPA MTUs to
1488 * the core we can advertise the larger MTU enum to the ULPs, for now
1489 * advertise only 4K.
1490 *
1491 * Those applications which are either OPA aware or pass the MTU enum
1492 * from the Path Records to us will get the new 8k MTU. Those that
1493 * attempt to process the MTU enum may fail in various ways.
1494 */
1495 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
1496 4096 : hfi1_max_mtu), IB_MTU_4096);
1497 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
Jan Sokolowski69a3ffa2017-11-14 04:34:45 -08001498 mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001499
Don Hiattd98bb7f2017-08-04 13:54:16 -07001500 /*
1501 * sm_lid of 0xFFFF needs special handling so that it can
1502 * be differentiated from a permissve LID of 0xFFFF.
1503 * We set the grh_required flag here so the SA can program
1504 * the DGID in the address handle appropriately
1505 */
1506 if (props->sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))
1507 props->grh_required = true;
1508
Mike Marciniszyn77241052015-07-30 15:17:43 -04001509 return 0;
1510}
1511
1512static int modify_device(struct ib_device *device,
1513 int device_modify_mask,
1514 struct ib_device_modify *device_modify)
1515{
1516 struct hfi1_devdata *dd = dd_from_ibdev(device);
1517 unsigned i;
1518 int ret;
1519
1520 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1521 IB_DEVICE_MODIFY_NODE_DESC)) {
1522 ret = -EOPNOTSUPP;
1523 goto bail;
1524 }
1525
1526 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
Yuval Shaiabd99fde2016-08-25 10:57:07 -07001527 memcpy(device->node_desc, device_modify->node_desc,
1528 IB_DEVICE_NODE_DESC_MAX);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001529 for (i = 0; i < dd->num_pports; i++) {
1530 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1531
1532 hfi1_node_desc_chg(ibp);
1533 }
1534 }
1535
1536 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1537 ib_hfi1_sys_image_guid =
1538 cpu_to_be64(device_modify->sys_image_guid);
1539 for (i = 0; i < dd->num_pports; i++) {
1540 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1541
1542 hfi1_sys_guid_chg(ibp);
1543 }
1544 }
1545
1546 ret = 0;
1547
1548bail:
1549 return ret;
1550}
1551
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001552static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001553{
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001554 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1555 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1556 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1557 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001558
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001559 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
1560 OPA_LINKDOWN_REASON_UNKNOWN);
1561 ret = set_link_state(ppd, HLS_DN_DOWNDEF);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001562 return ret;
1563}
1564
Dennis Dalessandro25131462016-02-03 14:36:40 -08001565static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1566 int guid_index, __be64 *guid)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001567{
Dennis Dalessandro25131462016-02-03 14:36:40 -08001568 struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001569
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001570 if (guid_index >= HFI1_GUIDS_PER_PORT)
Dennis Dalessandro25131462016-02-03 14:36:40 -08001571 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001572
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001573 *guid = get_sguid(ibp, guid_index);
Dennis Dalessandro25131462016-02-03 14:36:40 -08001574 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001575}
1576
Mike Marciniszyn77241052015-07-30 15:17:43 -04001577/*
1578 * convert ah port,sl to sc
1579 */
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001580u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001581{
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001582 struct hfi1_ibport *ibp = to_iport(ibdev, rdma_ah_get_port_num(ah));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001583
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001584 return ibp->sl_to_sc[rdma_ah_get_sl(ah)];
Mike Marciniszyn77241052015-07-30 15:17:43 -04001585}
1586
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001587static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001588{
1589 struct hfi1_ibport *ibp;
1590 struct hfi1_pportdata *ppd;
1591 struct hfi1_devdata *dd;
1592 u8 sc5;
1593
Don Hiatt13c19222017-08-04 13:53:51 -07001594 if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
1595 !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
1596 return -EINVAL;
1597
Mike Marciniszyn77241052015-07-30 15:17:43 -04001598 /* test the mapping for validity */
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001599 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001600 ppd = ppd_from_ibp(ibp);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001601 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
Mike Marciniszyn77241052015-07-30 15:17:43 -04001602 dd = dd_from_ppd(ppd);
1603 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
Dennis Dalessandro15723f02016-01-19 14:42:17 -08001604 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001605 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606}
1607
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001608static void hfi1_notify_new_ah(struct ib_device *ibdev,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001609 struct rdma_ah_attr *ah_attr,
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001610 struct rvt_ah *ah)
1611{
1612 struct hfi1_ibport *ibp;
1613 struct hfi1_pportdata *ppd;
1614 struct hfi1_devdata *dd;
1615 u8 sc5;
Don Hiattd98bb7f2017-08-04 13:54:16 -07001616 struct rdma_ah_attr *attr = &ah->attr;
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001617
1618 /*
1619 * Do not trust reading anything from rvt_ah at this point as it is not
1620 * done being setup. We can however modify things which we need to set.
1621 */
1622
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001623 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001624 ppd = ppd_from_ibp(ibp);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001625 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)];
Don Hiattd98bb7f2017-08-04 13:54:16 -07001626 hfi1_update_ah_attr(ibdev, attr);
1627 hfi1_make_opa_lid(attr);
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001628 dd = dd_from_ppd(ppd);
1629 ah->vl = sc_to_vlt(dd, sc5);
1630 if (ah->vl < num_vls || ah->vl == 15)
1631 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
1632}
1633
Mike Marciniszyn77241052015-07-30 15:17:43 -04001634/**
Mike Marciniszyn77241052015-07-30 15:17:43 -04001635 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1636 * @dd: the hfi1_ib device
1637 */
1638unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
1639{
1640 return ARRAY_SIZE(dd->pport[0].pkeys);
1641}
1642
Mike Marciniszyn77241052015-07-30 15:17:43 -04001643static void init_ibport(struct hfi1_pportdata *ppd)
1644{
1645 struct hfi1_ibport *ibp = &ppd->ibport_data;
1646 size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
1647 int i;
1648
1649 for (i = 0; i < sz; i++) {
1650 ibp->sl_to_sc[i] = i;
1651 ibp->sc_to_sl[i] = i;
1652 }
1653
Michael J. Ruhlbf90aad2017-07-24 07:46:12 -07001654 for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
1655 INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
Kees Cook80641352017-10-16 15:51:54 -07001656 timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
Michael J. Ruhlbf90aad2017-07-24 07:46:12 -07001657
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001658 spin_lock_init(&ibp->rvp.lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001659 /* Set the prefix to the default value (see ch. 4.1.1) */
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001660 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1661 ibp->rvp.sm_lid = 0;
Vishwanathapura, Niranjanacb493662017-06-01 17:04:02 -07001662 /*
1663 * Below should only set bits defined in OPA PortInfo.CapabilityMask
1664 * and PortInfo.CapabilityMask3
1665 */
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001666 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
Mike Marciniszyn77241052015-07-30 15:17:43 -04001667 IB_PORT_CAP_MASK_NOTICE_SUP;
Vishwanathapura, Niranjanacb493662017-06-01 17:04:02 -07001668 ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001669 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1670 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1671 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1672 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1673 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001674
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001675 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1676 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001677}
1678
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03001679static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str)
Ira Weiny939b6ca2016-06-15 02:22:08 -04001680{
1681 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
1682 struct hfi1_ibdev *dev = dev_from_rdi(rdi);
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07001683 u32 ver = dd_from_dev(dev)->dc8051_ver;
Ira Weiny939b6ca2016-06-15 02:22:08 -04001684
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03001685 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u", dc8051_ver_maj(ver),
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07001686 dc8051_ver_min(ver), dc8051_ver_patch(ver));
Ira Weiny939b6ca2016-06-15 02:22:08 -04001687}
1688
Jianxin Xiongb7481942016-12-07 19:32:53 -08001689static const char * const driver_cntr_names[] = {
1690 /* must be element 0*/
1691 "DRIVER_KernIntr",
1692 "DRIVER_ErrorIntr",
1693 "DRIVER_Tx_Errs",
1694 "DRIVER_Rcv_Errs",
1695 "DRIVER_HW_Errs",
1696 "DRIVER_NoPIOBufs",
1697 "DRIVER_CtxtsOpen",
1698 "DRIVER_RcvLen_Errs",
1699 "DRIVER_EgrBufFull",
1700 "DRIVER_EgrHdrFull"
1701};
1702
Tadeusz Struk62eed662017-03-20 17:25:35 -07001703static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
Jianxin Xiongb7481942016-12-07 19:32:53 -08001704static const char **dev_cntr_names;
1705static const char **port_cntr_names;
1706static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
1707static int num_dev_cntrs;
1708static int num_port_cntrs;
1709static int cntr_names_initialized;
1710
1711/*
1712 * Convert a list of names separated by '\n' into an array of NULL terminated
1713 * strings. Optionally some entries can be reserved in the array to hold extra
1714 * external strings.
1715 */
1716static int init_cntr_names(const char *names_in,
Arnd Bergmann64b2ae72017-02-14 22:23:07 +01001717 const size_t names_len,
Jianxin Xiongb7481942016-12-07 19:32:53 -08001718 int num_extra_names,
1719 int *num_cntrs,
1720 const char ***cntr_names)
1721{
1722 char *names_out, *p, **q;
1723 int i, n;
1724
1725 n = 0;
1726 for (i = 0; i < names_len; i++)
1727 if (names_in[i] == '\n')
1728 n++;
1729
1730 names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
1731 GFP_KERNEL);
1732 if (!names_out) {
1733 *num_cntrs = 0;
1734 *cntr_names = NULL;
1735 return -ENOMEM;
1736 }
1737
1738 p = names_out + (n + num_extra_names) * sizeof(char *);
1739 memcpy(p, names_in, names_len);
1740
1741 q = (char **)names_out;
1742 for (i = 0; i < n; i++) {
1743 q[i] = p;
1744 p = strchr(p, '\n');
1745 *p++ = '\0';
1746 }
1747
1748 *num_cntrs = n;
1749 *cntr_names = (const char **)names_out;
1750 return 0;
1751}
1752
1753static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
1754 u8 port_num)
1755{
1756 int i, err;
1757
Tadeusz Struk62eed662017-03-20 17:25:35 -07001758 mutex_lock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001759 if (!cntr_names_initialized) {
1760 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1761
1762 err = init_cntr_names(dd->cntrnames,
1763 dd->cntrnameslen,
1764 num_driver_cntrs,
1765 &num_dev_cntrs,
1766 &dev_cntr_names);
Tadeusz Struk62eed662017-03-20 17:25:35 -07001767 if (err) {
1768 mutex_unlock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001769 return NULL;
Tadeusz Struk62eed662017-03-20 17:25:35 -07001770 }
Jianxin Xiongb7481942016-12-07 19:32:53 -08001771
1772 for (i = 0; i < num_driver_cntrs; i++)
1773 dev_cntr_names[num_dev_cntrs + i] =
1774 driver_cntr_names[i];
1775
1776 err = init_cntr_names(dd->portcntrnames,
1777 dd->portcntrnameslen,
1778 0,
1779 &num_port_cntrs,
1780 &port_cntr_names);
1781 if (err) {
1782 kfree(dev_cntr_names);
1783 dev_cntr_names = NULL;
Tadeusz Struk62eed662017-03-20 17:25:35 -07001784 mutex_unlock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001785 return NULL;
1786 }
1787 cntr_names_initialized = 1;
1788 }
Tadeusz Struk62eed662017-03-20 17:25:35 -07001789 mutex_unlock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08001790
1791 if (!port_num)
1792 return rdma_alloc_hw_stats_struct(
1793 dev_cntr_names,
1794 num_dev_cntrs + num_driver_cntrs,
1795 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1796 else
1797 return rdma_alloc_hw_stats_struct(
1798 port_cntr_names,
1799 num_port_cntrs,
1800 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1801}
1802
1803static u64 hfi1_sps_ints(void)
1804{
1805 unsigned long flags;
1806 struct hfi1_devdata *dd;
1807 u64 sps_ints = 0;
1808
1809 spin_lock_irqsave(&hfi1_devs_lock, flags);
1810 list_for_each_entry(dd, &hfi1_dev_list, list) {
1811 sps_ints += get_all_cpu_total(dd->int_counter);
1812 }
1813 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1814 return sps_ints;
1815}
1816
1817static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1818 u8 port, int index)
1819{
1820 u64 *values;
1821 int count;
1822
1823 if (!port) {
1824 u64 *stats = (u64 *)&hfi1_stats;
1825 int i;
1826
1827 hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
1828 values[num_dev_cntrs] = hfi1_sps_ints();
1829 for (i = 1; i < num_driver_cntrs; i++)
1830 values[num_dev_cntrs + i] = stats[i];
1831 count = num_dev_cntrs + num_driver_cntrs;
1832 } else {
1833 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1834
1835 hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
1836 count = num_port_cntrs;
1837 }
1838
1839 memcpy(stats->value, values, count * sizeof(u64));
1840 return count;
1841}
1842
Mike Marciniszyn77241052015-07-30 15:17:43 -04001843/**
1844 * hfi1_register_ib_device - register our device with the infiniband core
1845 * @dd: the device data structure
1846 * Return 0 if successful, errno if unsuccessful.
1847 */
1848int hfi1_register_ib_device(struct hfi1_devdata *dd)
1849{
1850 struct hfi1_ibdev *dev = &dd->verbs_dev;
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001851 struct ib_device *ibdev = &dev->rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001852 struct hfi1_pportdata *ppd = dd->pport;
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001853 struct hfi1_ibport *ibp = &ppd->ibport_data;
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001854 unsigned i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001855 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001856
Mike Marciniszyn77241052015-07-30 15:17:43 -04001857 for (i = 0; i < dd->num_pports; i++)
1858 init_ibport(ppd + i);
1859
1860 /* Only need to initialize non-zero fields. */
Dennis Dalessandro4f87ccf2016-01-19 14:41:50 -08001861
Kees Cook80641352017-10-16 15:51:54 -07001862 timer_setup(&dev->mem_timer, mem_timer, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001863
Mike Marciniszyn77241052015-07-30 15:17:43 -04001864 seqlock_init(&dev->iowait_lock);
Mike Marciniszyn4e045572016-10-10 06:14:28 -07001865 seqlock_init(&dev->txwait_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001866 INIT_LIST_HEAD(&dev->txwait);
1867 INIT_LIST_HEAD(&dev->memwait);
1868
Mike Marciniszyn45842ab2016-02-14 12:44:34 -08001869 ret = verbs_txreq_init(dev);
1870 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001871 goto err_verbs_txreq;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001872
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001873 /* Use first-port GUID as node guid */
1874 ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
1875
Mike Marciniszyn77241052015-07-30 15:17:43 -04001876 /*
1877 * The system image GUID is supposed to be the same for all
1878 * HFIs in a single system but since there can be other
1879 * device types in the system, we can't be sure this is unique.
1880 */
1881 if (!ib_hfi1_sys_image_guid)
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07001882 ib_hfi1_sys_image_guid = ibdev->node_guid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001883 ibdev->owner = THIS_MODULE;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001884 ibdev->phys_port_cnt = dd->num_pports;
Bart Van Assche30677712017-01-20 13:04:17 -08001885 ibdev->dev.parent = &dd->pcidev->dev;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001886 ibdev->modify_device = modify_device;
Jianxin Xiongb7481942016-12-07 19:32:53 -08001887 ibdev->alloc_hw_stats = alloc_hw_stats;
1888 ibdev->get_hw_stats = get_hw_stats;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001889 ibdev->alloc_rdma_netdev = hfi1_vnic_alloc_rn;
Dennis Dalessandro43316292016-01-19 14:44:01 -08001890
1891 /* keep process mad in the driver */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001892 ibdev->process_mad = hfi1_process_mad;
Ira Weiny939b6ca2016-06-15 02:22:08 -04001893 ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001894
1895 strncpy(ibdev->node_desc, init_utsname()->nodename,
1896 sizeof(ibdev->node_desc));
1897
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001898 /*
1899 * Fill in rvt info object.
1900 */
1901 dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
Dennis Dalessandro49dbb6c2016-01-19 14:42:06 -08001902 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
Dennis Dalessandro15723f02016-01-19 14:42:17 -08001903 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
Dennis Dalessandro8f1764fa2016-01-19 14:42:22 -08001904 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
Dennis Dalessandro25131462016-02-03 14:36:40 -08001905 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001906 dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
1907 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
1908 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
Harish Chegondi94d51712016-01-19 14:43:17 -08001909 /*
1910 * Fill in rvt info device attributes.
1911 */
1912 hfi1_fill_device_attr(dd);
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001913
1914 /* queue pair */
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001915 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
1916 dd->verbs_dev.rdi.dparms.qpn_start = 0;
1917 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1918 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
1919 dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
1920 dd->verbs_dev.rdi.dparms.qpn_res_end =
Dennis Dalessandroabd712d2016-01-19 14:43:22 -08001921 dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
Dennis Dalessandroec4274f2016-01-19 14:43:44 -08001922 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
1923 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
1924 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
1925 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
Dasaratharaman Chandramouli72214032017-08-04 13:54:53 -07001926 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA |
1927 RDMA_CORE_CAP_OPA_AH;
Harish Chegondi45b59ee2016-02-03 14:36:49 -08001928 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
1929
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001930 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
1931 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1932 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1933 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
Mike Marciniszynb6eac932017-04-09 10:16:35 -07001934 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
Dennis Dalessandro83693bd2016-01-19 14:43:33 -08001935 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001936 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
Dennis Dalessandroec4274f2016-01-19 14:43:44 -08001937 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
1938 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1939 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
1940 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
1941 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
1942 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1943 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
1944 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
1945 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
1946 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
Venkata Sandeep Dhanalakota56acbbf2017-02-08 05:27:19 -08001947 dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001948 dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
Sebastian Sanchez5d18ee62018-05-02 06:43:55 -07001949 dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
1950 hfi1_comp_vect_mappings_lookup;
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001951
Dennis Dalessandroabd712d2016-01-19 14:43:22 -08001952 /* completeion queue */
Sebastian Sanchez5d18ee62018-05-02 06:43:55 -07001953 dd->verbs_dev.rdi.ibdev.num_comp_vectors = dd->comp_vect_possible_cpus;
Mitko Haralanov27807392016-02-03 14:33:31 -08001954 dd->verbs_dev.rdi.dparms.node = dd->node;
Dennis Dalessandroabd712d2016-01-19 14:43:22 -08001955
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -08001956 /* misc settings */
Dennis Dalessandroabd712d2016-01-19 14:43:22 -08001957 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001958 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001959 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1960 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
1961
Mike Marciniszyn1ac57c52016-07-01 16:02:13 -07001962 /* post send table */
1963 dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
1964
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08001965 ppd = dd->pport;
1966 for (i = 0; i < dd->num_pports; i++, ppd++)
1967 rvt_init_port(&dd->verbs_dev.rdi,
1968 &ppd->ibport_data.rvp,
1969 i,
1970 ppd->pkeys);
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001971
Matan Barak0ede73b2018-03-19 15:02:34 +02001972 ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_HFI1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001973 if (ret)
Dennis Dalessandro9c4a3112016-01-19 14:44:11 -08001974 goto err_verbs_txreq;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001975
1976 ret = hfi1_verbs_register_sysfs(dd);
1977 if (ret)
1978 goto err_class;
1979
Dennis Dalessandro9c4a3112016-01-19 14:44:11 -08001980 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001981
1982err_class:
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001983 rvt_unregister_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001984err_verbs_txreq:
Mike Marciniszyn45842ab2016-02-14 12:44:34 -08001985 verbs_txreq_exit(dev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001986 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001987 return ret;
1988}
1989
1990void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
1991{
1992 struct hfi1_ibdev *dev = &dd->verbs_dev;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001993
1994 hfi1_verbs_unregister_sysfs(dd);
1995
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08001996 rvt_unregister_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001997
1998 if (!list_empty(&dev->txwait))
1999 dd_dev_err(dd, "txwait list not empty!\n");
2000 if (!list_empty(&dev->memwait))
2001 dd_dev_err(dd, "memwait list not empty!\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04002002
Mike Marciniszyn77241052015-07-30 15:17:43 -04002003 del_timer_sync(&dev->mem_timer);
Mike Marciniszyn45842ab2016-02-14 12:44:34 -08002004 verbs_txreq_exit(dev);
Jianxin Xiongb7481942016-12-07 19:32:53 -08002005
Tadeusz Struk62eed662017-03-20 17:25:35 -07002006 mutex_lock(&cntr_names_lock);
Jianxin Xiongb7481942016-12-07 19:32:53 -08002007 kfree(dev_cntr_names);
2008 kfree(port_cntr_names);
Tadeusz Struk62eed662017-03-20 17:25:35 -07002009 dev_cntr_names = NULL;
2010 port_cntr_names = NULL;
Jianxin Xiongb7481942016-12-07 19:32:53 -08002011 cntr_names_initialized = 0;
Tadeusz Struk62eed662017-03-20 17:25:35 -07002012 mutex_unlock(&cntr_names_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04002013}
2014
Mike Marciniszyn77241052015-07-30 15:17:43 -04002015void hfi1_cnp_rcv(struct hfi1_packet *packet)
2016{
Sebastian Sanchezf3e862c2017-02-08 05:26:25 -08002017 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
Arthur Kepner977940b2015-11-04 21:10:10 -05002018 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
Mike Marciniszyn261a4352016-09-06 04:35:05 -07002019 struct ib_header *hdr = packet->hdr;
Dennis Dalessandro895420d2016-01-19 14:42:28 -08002020 struct rvt_qp *qp = packet->qp;
Arthur Kepner977940b2015-11-04 21:10:10 -05002021 u32 lqpn, rqpn = 0;
2022 u16 rlid = 0;
Dasaratharaman Chandramoulib736a462016-07-25 13:40:34 -07002023 u8 sl, sc5, svc_type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04002024
Arthur Kepner977940b2015-11-04 21:10:10 -05002025 switch (packet->qp->ibqp.qp_type) {
2026 case IB_QPT_UC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002027 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
Arthur Kepner977940b2015-11-04 21:10:10 -05002028 rqpn = qp->remote_qpn;
2029 svc_type = IB_CC_SVCTYPE_UC;
2030 break;
2031 case IB_QPT_RC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04002032 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
Arthur Kepner977940b2015-11-04 21:10:10 -05002033 rqpn = qp->remote_qpn;
2034 svc_type = IB_CC_SVCTYPE_RC;
2035 break;
2036 case IB_QPT_SMI:
2037 case IB_QPT_GSI:
2038 case IB_QPT_UD:
2039 svc_type = IB_CC_SVCTYPE_UD;
2040 break;
2041 default:
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08002042 ibp->rvp.n_pkt_drops++;
Arthur Kepner977940b2015-11-04 21:10:10 -05002043 return;
2044 }
2045
Dasaratharaman Chandramouliaad559c2017-04-09 10:16:15 -07002046 sc5 = hfi1_9B_get_sc5(hdr, packet->rhf);
Arthur Kepner977940b2015-11-04 21:10:10 -05002047 sl = ibp->sc_to_sl[sc5];
2048 lqpn = qp->ibqp.qp_num;
2049
2050 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04002051}