blob: 9ced8a4a7b769f5dfe58fb2576919b1515129027 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Mitko Haralanova74d5302018-05-02 06:43:24 -07002 * Copyright(c) 2015 - 2018 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#ifndef HFI1_VERBS_H
49#define HFI1_VERBS_H
50
51#include <linux/types.h>
52#include <linux/seqlock.h>
53#include <linux/kernel.h>
54#include <linux/interrupt.h>
55#include <linux/kref.h>
56#include <linux/workqueue.h>
57#include <linux/kthread.h>
58#include <linux/completion.h>
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -080059#include <linux/slab.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040060#include <rdma/ib_pack.h>
61#include <rdma/ib_user_verbs.h>
62#include <rdma/ib_mad.h>
Mike Marciniszyn261a4352016-09-06 04:35:05 -070063#include <rdma/ib_hdrs.h>
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080064#include <rdma/rdma_vt.h>
Dennis Dalessandro54d10c12016-01-19 14:43:01 -080065#include <rdma/rdmavt_qp.h>
Dennis Dalessandroabd712d2016-01-19 14:43:22 -080066#include <rdma/rdmavt_cq.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040067
68struct hfi1_ctxtdata;
69struct hfi1_pportdata;
70struct hfi1_devdata;
71struct hfi1_packet;
72
73#include "iowait.h"
Mike Marciniszyn5190f052018-11-28 10:22:31 -080074#include "tid_rdma.h"
Mitko Haralanov44e43d92019-01-24 06:09:46 -080075#include "opfn.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040076
77#define HFI1_MAX_RDMA_ATOMIC 16
Mike Marciniszyn77241052015-07-30 15:17:43 -040078
79/*
80 * Increment this value if any changes that break userspace ABI
81 * compatibility are made.
82 */
83#define HFI1_UVERBS_ABI_VERSION 2
84
Mike Marciniszyn77241052015-07-30 15:17:43 -040085/* IB Performance Manager status values */
86#define IB_PMA_SAMPLE_STATUS_DONE 0x00
87#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
88#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
89
90/* Mandatory IB performance counter select values. */
91#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
92#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
93#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
94#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
95#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
96
97#define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
98
Mike Marciniszyn77241052015-07-30 15:17:43 -040099#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
Don Hiatt5786adf32017-08-04 13:54:10 -0700100#define OPA_BTH_MIG_REQ BIT(31)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400101
Mike Marciniszynb374e062016-09-25 07:40:58 -0700102#define RC_OP(x) IB_OPCODE_RC_##x
103#define UC_OP(x) IB_OPCODE_UC_##x
104
Mike Marciniszyn77241052015-07-30 15:17:43 -0400105/* flags passed by hfi1_ib_rcv() */
106enum {
107 HFI1_HAS_GRH = (1 << 0),
108};
109
Mike Marciniszyn78d36332018-02-01 10:52:35 -0800110#define LRH_16B_BYTES (FIELD_SIZEOF(struct hfi1_16b_header, lrh))
111#define LRH_16B_DWORDS (LRH_16B_BYTES / sizeof(u32))
112#define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh))
113#define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32))
114
Don Hiatt4171a692018-05-15 18:28:07 -0700115/* 24Bits for qpn, upper 8Bits reserved */
116struct opa_16b_mgmt {
117 __be32 dest_qpn;
118 __be32 src_qpn;
119};
120
Don Hiatt72c07e22017-08-04 13:53:58 -0700121struct hfi1_16b_header {
122 u32 lrh[4];
123 union {
124 struct {
125 struct ib_grh grh;
126 struct ib_other_headers oth;
127 } l;
128 struct ib_other_headers oth;
Don Hiatt4171a692018-05-15 18:28:07 -0700129 struct opa_16b_mgmt mgmt;
Don Hiatt72c07e22017-08-04 13:53:58 -0700130 } u;
131} __packed;
132
Don Hiatt30e07412017-08-04 13:54:04 -0700133struct hfi1_opa_header {
134 union {
135 struct ib_header ibh; /* 9B header */
136 struct hfi1_16b_header opah; /* 16B header */
137 };
138 u8 hdr_type; /* 9B or 16B */
139} __packed;
140
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700141struct hfi1_ahg_info {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400142 u32 ahgdesc[2];
143 u16 tx_flags;
144 u8 ahgcount;
145 u8 ahgidx;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400146};
147
Don Hiattd4d602e2016-07-25 13:40:22 -0700148struct hfi1_sdma_header {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400149 __le64 pbc;
Don Hiatt30e07412017-08-04 13:54:04 -0700150 struct hfi1_opa_header hdr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400151} __packed;
152
153/*
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800154 * hfi1 specific data structures that will be hidden from rvt after the queue
155 * pair is made common
156 */
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800157struct hfi1_qp_priv {
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700158 struct hfi1_ahg_info *s_ahg; /* ahg info for next header */
Jubin John721d0422016-02-14 12:45:00 -0800159 struct sdma_engine *s_sde; /* current sde */
160 struct send_context *s_sendcontext; /* current sendcontext */
Mike Marciniszyn5190f052018-11-28 10:22:31 -0800161 struct hfi1_ctxtdata *rcd; /* QP's receive context */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800162 struct page **pages; /* for TID page scan */
Kaike Wan37356e72019-02-05 14:13:13 -0800163 u32 tid_enqueue; /* saved when tid waited */
Jubin John721d0422016-02-14 12:45:00 -0800164 u8 s_sc; /* SC[0..4] for next packet */
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800165 struct iowait s_iowait;
Kaike Wan37356e72019-02-05 14:13:13 -0800166 struct list_head tid_wait; /* for queueing tid space */
Kaike Wand22a2072019-01-23 19:20:42 -0800167 struct hfi1_opfn_data opfn;
Kaike Wan37356e72019-02-05 14:13:13 -0800168 struct tid_flow_state flow_state;
Kaike Wand22a2072019-01-23 19:20:42 -0800169 struct tid_rdma_qp_params tid_rdma;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800170 struct rvt_qp *owner;
Don Hiattd98bb7f2017-08-04 13:54:16 -0700171 u8 hdr_type; /* 9B or 16B */
Kaike Wand22a2072019-01-23 19:20:42 -0800172 unsigned long tid_timer_timeout_jiffies;
Kaike Wan742a3822019-01-23 19:30:40 -0800173
Kaike Wana0b34f72019-01-24 06:36:48 -0800174 /* variables for the TID RDMA SE state machine */
Kaike Wan07b923702019-01-23 21:48:59 -0800175 u8 rnr_nak_state; /* RNR NAK state */
Kaike Wana0b34f72019-01-24 06:36:48 -0800176 u32 s_flags;
Kaike Wan07b923702019-01-23 21:48:59 -0800177 u32 r_tid_head; /* Most recently added TID RDMA request */
178 u32 r_tid_tail; /* the last completed TID RDMA request */
179 u32 r_tid_ack; /* the TID RDMA request to be ACK'ed */
180 u32 r_tid_alloc; /* Request for which we are allocating resources */
181 u32 pending_tid_w_segs; /* Num of pending tid write segments */
182 u32 alloc_w_segs; /* Number of segments for which write */
183 /* resources have been allocated for this QP */
Kaike Wana0b34f72019-01-24 06:36:48 -0800184
Kaike Wan742a3822019-01-23 19:30:40 -0800185 /* For TID RDMA READ */
Kaike Wan9905bf02019-02-05 14:13:30 -0800186 u32 tid_r_reqs; /* Num of tid reads requested */
187 u32 tid_r_comp; /* Num of tid reads completed */
Kaike Wan742a3822019-01-23 19:30:40 -0800188 u32 pending_tid_r_segs; /* Num of pending tid read segments */
Kaike Wand22a2072019-01-23 19:20:42 -0800189 u16 pkts_ps; /* packets per segment */
190 u8 timeout_shift; /* account for number of packets per segment */
Kaike Wan07b923702019-01-23 21:48:59 -0800191
192 u8 sync_pt; /* Set when QP reaches sync point */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400193};
194
Kaike Wan07b923702019-01-23 21:48:59 -0800195#define HFI1_QP_WQE_INVALID ((u32)-1)
196
Kaike Wan838b6fd2019-01-23 19:30:07 -0800197struct hfi1_swqe_priv {
198 struct tid_rdma_request tid_req;
Kaike Wan742a3822019-01-23 19:30:40 -0800199 struct rvt_sge_state ss; /* Used for TID RDMA READ Request */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800200};
201
202struct hfi1_ack_priv {
203 struct tid_rdma_request tid_req;
204};
205
Mike Marciniszyn77241052015-07-30 15:17:43 -0400206/*
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500207 * This structure is used to hold commonly lookedup and computed values during
208 * the send engine progress.
209 */
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700210struct iowait_work;
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500211struct hfi1_pkt_state {
212 struct hfi1_ibdev *dev;
213 struct hfi1_ibport *ibp;
214 struct hfi1_pportdata *ppd;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800215 struct verbs_txreq *s_txreq;
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700216 struct iowait_work *wait;
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700217 unsigned long flags;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700218 unsigned long timeout;
219 unsigned long timeout_int;
220 int cpu;
Don Hiatt566d53a2017-08-04 13:54:47 -0700221 u8 opcode;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700222 bool in_thread;
Kaike Wanbcad2912017-07-24 07:45:37 -0700223 bool pkts_sent;
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500224};
225
Mike Marciniszyn77241052015-07-30 15:17:43 -0400226#define HFI1_PSN_CREDIT 16
227
Mike Marciniszyn77241052015-07-30 15:17:43 -0400228struct hfi1_opcode_stats {
229 u64 n_packets; /* number of packets */
230 u64 n_bytes; /* total number of bytes */
231};
232
233struct hfi1_opcode_stats_perctx {
234 struct hfi1_opcode_stats stats[256];
235};
236
237static inline void inc_opstats(
238 u32 tlen,
239 struct hfi1_opcode_stats *stats)
240{
241#ifdef CONFIG_DEBUG_FS
242 stats->n_bytes += tlen;
243 stats->n_packets++;
244#endif
245}
246
247struct hfi1_ibport {
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800248 struct rvt_qp __rcu *qp[2];
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800249 struct rvt_ibport rvp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400250
Mike Marciniszyn77241052015-07-30 15:17:43 -0400251 /* the first 16 entries are sl_to_vl for !OPA */
252 u8 sl_to_sc[32];
253 u8 sc_to_sl[32];
254};
255
Mike Marciniszyn77241052015-07-30 15:17:43 -0400256struct hfi1_ibdev {
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -0800257 struct rvt_dev_info rdi; /* Must be first */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400258
Mike Marciniszyn77241052015-07-30 15:17:43 -0400259 /* QP numbers are shared by all IB ports */
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700260 /* protect txwait list */
261 seqlock_t txwait_lock ____cacheline_aligned_in_smp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400262 struct list_head txwait; /* list for wait verbs_txreq */
263 struct list_head memwait; /* list for wait kernel memory */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400264 struct kmem_cache *verbs_txreq_cache;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400265 u64 n_txwait;
266 u64 n_kmem_wait;
Kaike Wan2f16a692019-01-23 19:30:18 -0800267 u64 n_tidwait;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400268
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700269 /* protect iowait lists */
270 seqlock_t iowait_lock ____cacheline_aligned_in_smp;
271 u64 n_piowait;
272 u64 n_piodrain;
273 struct timer_list mem_timer;
274
Mike Marciniszyn77241052015-07-30 15:17:43 -0400275#ifdef CONFIG_DEBUG_FS
276 /* per HFI debugfs */
277 struct dentry *hfi1_ibdev_dbg;
278 /* per HFI symlinks to above */
279 struct dentry *hfi1_ibdev_link;
Don Hiatt0181ce32017-03-20 17:26:14 -0700280#ifdef CONFIG_FAULT_INJECTION
Mitko Haralanova74d5302018-05-02 06:43:24 -0700281 struct fault *fault;
Don Hiatt0181ce32017-03-20 17:26:14 -0700282#endif
Mike Marciniszyn77241052015-07-30 15:17:43 -0400283#endif
284};
285
Mike Marciniszyn77241052015-07-30 15:17:43 -0400286static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
287{
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -0800288 struct rvt_dev_info *rdi;
289
290 rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
291 return container_of(rdi, struct hfi1_ibdev, rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400292}
293
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700294static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800295{
296 struct hfi1_qp_priv *priv;
297
298 priv = container_of(s_iowait, struct hfi1_qp_priv, s_iowait);
299 return priv->owner;
300}
301
Mike Marciniszyn77241052015-07-30 15:17:43 -0400302/*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400303 * This must be called with s_lock held.
304 */
Dennis Dalessandro13d84912017-05-29 17:22:01 -0700305void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
Don Hiatt88733e32017-08-04 13:54:23 -0700306 u32 qp1, u32 qp2, u32 lid1, u32 lid2);
Harish Chegondi45b59ee2016-02-03 14:36:49 -0800307void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400308void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
309void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
310int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
311 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
312 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
313 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
314 u16 *out_mad_pkey_index);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400315
316/*
317 * The PSN_MASK and PSN_SHIFT allow for
318 * 1) comparing two PSNs
319 * 2) returning the PSN with any upper bits masked
320 * 3) returning the difference between to PSNs
321 *
322 * The number of significant bits in the PSN must
323 * necessarily be at least one bit less than
324 * the container holding the PSN.
325 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400326#define PSN_MASK 0x7FFFFFFF
327#define PSN_SHIFT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400328#define PSN_MODIFY_MASK 0xFFFFFF
329
Mike Marciniszyn77241052015-07-30 15:17:43 -0400330/*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400331 * Compare two PSNs
332 * Returns an integer <, ==, or > than zero.
333 */
334static inline int cmp_psn(u32 a, u32 b)
335{
Jubin John50e5dcb2016-02-14 20:19:41 -0800336 return (((int)a) - ((int)b)) << PSN_SHIFT;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400337}
338
339/*
340 * Return masked PSN
341 */
342static inline u32 mask_psn(u32 a)
343{
344 return a & PSN_MASK;
345}
346
347/*
348 * Return delta between two PSNs
349 */
350static inline u32 delta_psn(u32 a, u32 b)
351{
352 return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
353}
354
Kaike Wan742a3822019-01-23 19:30:40 -0800355static inline struct tid_rdma_request *wqe_to_tid_req(struct rvt_swqe *wqe)
356{
357 return &((struct hfi1_swqe_priv *)wqe->priv)->tid_req;
358}
359
Kaike Wand0d564a2019-01-23 19:31:02 -0800360static inline struct tid_rdma_request *ack_to_tid_req(struct rvt_ack_entry *e)
361{
362 return &((struct hfi1_ack_priv *)e->priv)->tid_req;
363}
364
Kaike Wan838b6fd2019-01-23 19:30:07 -0800365/*
366 * Look through all the active flows for a TID RDMA request and find
367 * the one (if it exists) that contains the specified PSN.
368 */
369static inline u32 __full_flow_psn(struct flow_state *state, u32 psn)
370{
371 return mask_psn((state->generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
372 (psn & HFI1_KDETH_BTH_SEQ_MASK));
373}
374
375static inline u32 full_flow_psn(struct tid_rdma_flow *flow, u32 psn)
376{
377 return __full_flow_psn(&flow->flow_state, psn);
378}
379
Mike Marciniszyn77241052015-07-30 15:17:43 -0400380struct verbs_txreq;
381void hfi1_put_txreq(struct verbs_txreq *tx);
382
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800383int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400384
Mike Marciniszyn77241052015-07-30 15:17:43 -0400385void hfi1_cnp_rcv(struct hfi1_packet *packet);
386
387void hfi1_uc_rcv(struct hfi1_packet *packet);
388
389void hfi1_rc_rcv(struct hfi1_packet *packet);
390
391void hfi1_rc_hdrerr(
392 struct hfi1_ctxtdata *rcd,
Don Hiatt90397462017-05-12 09:20:20 -0700393 struct hfi1_packet *packet,
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800394 struct rvt_qp *qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400395
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400396u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400397
Don Hiatt30e07412017-08-04 13:54:04 -0700398void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400399
Mike Marciniszyn77241052015-07-30 15:17:43 -0400400void hfi1_ud_rcv(struct hfi1_packet *packet);
401
402int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
403
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800404void hfi1_migrate_qp(struct rvt_qp *qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400405
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800406int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
407 int attr_mask, struct ib_udata *udata);
408
409void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
410 int attr_mask, struct ib_udata *udata);
Venkata Sandeep Dhanalakota56acbbf2017-02-08 05:27:19 -0800411void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
Kaike Wand205a06a2018-09-26 10:26:44 -0700412int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
413 bool *call_send);
Ira Weiny31e7af12016-02-03 14:33:14 -0800414
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800415extern const u32 rc_only_opcode;
416extern const u32 uc_only_opcode;
417
Don Hiatt90397462017-05-12 09:20:20 -0700418int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400419
420u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400421 const struct ib_global_route *grh, u32 hwords, u32 nwords);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400422
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700423void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
Mitko Haralanov44e43d92019-01-24 06:09:46 -0800424 u32 bth0, u32 bth1, u32 bth2, int middle,
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800425 struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400426
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800427void _hfi1_do_send(struct work_struct *work);
428
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700429void hfi1_do_send_from_rvt(struct rvt_qp *qp);
430
431void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400432
Sebastian Sanchezbdaf96f2018-02-01 10:46:31 -0800433void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400434
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800435int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400436
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800437int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400438
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800439int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400440
441int hfi1_register_ib_device(struct hfi1_devdata *);
442
443void hfi1_unregister_ib_device(struct hfi1_devdata *);
444
Kaike Wan22d136d2019-01-24 06:36:34 -0800445void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet);
446
447void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet);
448
Mike Marciniszyn77241052015-07-30 15:17:43 -0400449void hfi1_ib_rcv(struct hfi1_packet *packet);
450
Don Hiatt72c07e22017-08-04 13:53:58 -0700451void hfi1_16B_rcv(struct hfi1_packet *packet);
452
Mike Marciniszyn77241052015-07-30 15:17:43 -0400453unsigned hfi1_get_npkeys(struct hfi1_devdata *);
454
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800455int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500456 u64 pbc);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400457
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800458int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500459 u64 pbc);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400460
Sebastian Sanchez6d6b8842018-02-01 10:46:23 -0800461static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr)
462{
463 return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ);
464}
465
Kaike Wan838b6fd2019-01-23 19:30:07 -0800466void hfi1_wait_kmem(struct rvt_qp *qp);
467
468static inline void hfi1_trdma_send_complete(struct rvt_qp *qp,
469 struct rvt_swqe *wqe,
470 enum ib_wc_status status)
471{
472 trdma_clean_swqe(qp, wqe);
473 rvt_send_complete(qp, wqe, status);
474}
475
Mike Marciniszyn77241052015-07-30 15:17:43 -0400476extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
477
478extern const u8 hdr_len_by_opcode[];
479
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800480extern const int ib_rvt_state_ops[];
Mike Marciniszyn77241052015-07-30 15:17:43 -0400481
482extern __be64 ib_hfi1_sys_image_guid; /* in network order */
483
Mike Marciniszyn77241052015-07-30 15:17:43 -0400484extern unsigned int hfi1_max_cqes;
485
486extern unsigned int hfi1_max_cqs;
487
488extern unsigned int hfi1_max_qp_wrs;
489
490extern unsigned int hfi1_max_qps;
491
492extern unsigned int hfi1_max_sges;
493
494extern unsigned int hfi1_max_mcast_grps;
495
496extern unsigned int hfi1_max_mcast_qp_attached;
497
498extern unsigned int hfi1_max_srqs;
499
500extern unsigned int hfi1_max_srq_sges;
501
502extern unsigned int hfi1_max_srq_wrs;
503
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800504extern unsigned short piothreshold;
505
Mike Marciniszyn77241052015-07-30 15:17:43 -0400506extern const u32 ib_hfi1_rnr_table[];
507
Mike Marciniszyn77241052015-07-30 15:17:43 -0400508#endif /* HFI1_VERBS_H */