blob: bfd642e831f72352a8822f853baa60df14a95e84 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Mitko Haralanova74d5302018-05-02 06:43:24 -07002 * Copyright(c) 2015 - 2018 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#ifndef HFI1_VERBS_H
49#define HFI1_VERBS_H
50
51#include <linux/types.h>
52#include <linux/seqlock.h>
53#include <linux/kernel.h>
54#include <linux/interrupt.h>
55#include <linux/kref.h>
56#include <linux/workqueue.h>
57#include <linux/kthread.h>
58#include <linux/completion.h>
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -080059#include <linux/slab.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040060#include <rdma/ib_pack.h>
61#include <rdma/ib_user_verbs.h>
62#include <rdma/ib_mad.h>
Mike Marciniszyn261a4352016-09-06 04:35:05 -070063#include <rdma/ib_hdrs.h>
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080064#include <rdma/rdma_vt.h>
Dennis Dalessandro54d10c12016-01-19 14:43:01 -080065#include <rdma/rdmavt_qp.h>
Dennis Dalessandroabd712d2016-01-19 14:43:22 -080066#include <rdma/rdmavt_cq.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040067
68struct hfi1_ctxtdata;
69struct hfi1_pportdata;
70struct hfi1_devdata;
71struct hfi1_packet;
72
73#include "iowait.h"
Mike Marciniszyn5190f052018-11-28 10:22:31 -080074#include "tid_rdma.h"
Mitko Haralanov44e43d92019-01-24 06:09:46 -080075#include "opfn.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040076
77#define HFI1_MAX_RDMA_ATOMIC 16
Mike Marciniszyn77241052015-07-30 15:17:43 -040078
79/*
80 * Increment this value if any changes that break userspace ABI
81 * compatibility are made.
82 */
83#define HFI1_UVERBS_ABI_VERSION 2
84
Mike Marciniszyn77241052015-07-30 15:17:43 -040085/* IB Performance Manager status values */
86#define IB_PMA_SAMPLE_STATUS_DONE 0x00
87#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
88#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
89
90/* Mandatory IB performance counter select values. */
91#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
92#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
93#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
94#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
95#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
96
97#define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
98
Mike Marciniszyn77241052015-07-30 15:17:43 -040099#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
Don Hiatt5786adf32017-08-04 13:54:10 -0700100#define OPA_BTH_MIG_REQ BIT(31)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400101
Mike Marciniszynb374e062016-09-25 07:40:58 -0700102#define RC_OP(x) IB_OPCODE_RC_##x
103#define UC_OP(x) IB_OPCODE_UC_##x
104
Mike Marciniszyn77241052015-07-30 15:17:43 -0400105/* flags passed by hfi1_ib_rcv() */
106enum {
107 HFI1_HAS_GRH = (1 << 0),
108};
109
Mike Marciniszyn78d36332018-02-01 10:52:35 -0800110#define LRH_16B_BYTES (FIELD_SIZEOF(struct hfi1_16b_header, lrh))
111#define LRH_16B_DWORDS (LRH_16B_BYTES / sizeof(u32))
112#define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh))
113#define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32))
114
Don Hiatt4171a692018-05-15 18:28:07 -0700115/* 24Bits for qpn, upper 8Bits reserved */
116struct opa_16b_mgmt {
117 __be32 dest_qpn;
118 __be32 src_qpn;
119};
120
Don Hiatt72c07e22017-08-04 13:53:58 -0700121struct hfi1_16b_header {
122 u32 lrh[4];
123 union {
124 struct {
125 struct ib_grh grh;
126 struct ib_other_headers oth;
127 } l;
128 struct ib_other_headers oth;
Don Hiatt4171a692018-05-15 18:28:07 -0700129 struct opa_16b_mgmt mgmt;
Don Hiatt72c07e22017-08-04 13:53:58 -0700130 } u;
131} __packed;
132
Don Hiatt30e07412017-08-04 13:54:04 -0700133struct hfi1_opa_header {
134 union {
135 struct ib_header ibh; /* 9B header */
136 struct hfi1_16b_header opah; /* 16B header */
137 };
138 u8 hdr_type; /* 9B or 16B */
139} __packed;
140
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700141struct hfi1_ahg_info {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400142 u32 ahgdesc[2];
143 u16 tx_flags;
144 u8 ahgcount;
145 u8 ahgidx;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400146};
147
Don Hiattd4d602e2016-07-25 13:40:22 -0700148struct hfi1_sdma_header {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400149 __le64 pbc;
Don Hiatt30e07412017-08-04 13:54:04 -0700150 struct hfi1_opa_header hdr;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400151} __packed;
152
153/*
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800154 * hfi1 specific data structures that will be hidden from rvt after the queue
155 * pair is made common
156 */
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800157struct hfi1_qp_priv {
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700158 struct hfi1_ahg_info *s_ahg; /* ahg info for next header */
Jubin John721d0422016-02-14 12:45:00 -0800159 struct sdma_engine *s_sde; /* current sde */
160 struct send_context *s_sendcontext; /* current sendcontext */
Mike Marciniszyn5190f052018-11-28 10:22:31 -0800161 struct hfi1_ctxtdata *rcd; /* QP's receive context */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800162 struct page **pages; /* for TID page scan */
Kaike Wan37356e72019-02-05 14:13:13 -0800163 u32 tid_enqueue; /* saved when tid waited */
Jubin John721d0422016-02-14 12:45:00 -0800164 u8 s_sc; /* SC[0..4] for next packet */
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800165 struct iowait s_iowait;
Kaike Wan3c759e02019-01-23 21:49:19 -0800166 struct timer_list s_tid_timer; /* for timing tid wait */
Kaike Wan829eaee2019-01-23 21:50:24 -0800167 struct timer_list s_tid_retry_timer; /* for timing tid ack */
Kaike Wan37356e72019-02-05 14:13:13 -0800168 struct list_head tid_wait; /* for queueing tid space */
Kaike Wand22a2072019-01-23 19:20:42 -0800169 struct hfi1_opfn_data opfn;
Kaike Wan37356e72019-02-05 14:13:13 -0800170 struct tid_flow_state flow_state;
Kaike Wand22a2072019-01-23 19:20:42 -0800171 struct tid_rdma_qp_params tid_rdma;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800172 struct rvt_qp *owner;
Don Hiattd98bb7f2017-08-04 13:54:16 -0700173 u8 hdr_type; /* 9B or 16B */
Kaike Wan9e93e962019-01-23 21:50:14 -0800174 atomic_t n_tid_requests; /* # of sent TID RDMA requests */
Kaike Wand22a2072019-01-23 19:20:42 -0800175 unsigned long tid_timer_timeout_jiffies;
Kaike Wan829eaee2019-01-23 21:50:24 -0800176 unsigned long tid_retry_timeout_jiffies;
Kaike Wan742a3822019-01-23 19:30:40 -0800177
Kaike Wana0b34f72019-01-24 06:36:48 -0800178 /* variables for the TID RDMA SE state machine */
Kaike Wan9e93e962019-01-23 21:50:14 -0800179 u8 s_state;
180 u8 s_retry;
Kaike Wan07b923702019-01-23 21:48:59 -0800181 u8 rnr_nak_state; /* RNR NAK state */
Kaike Wand72fe7d2019-01-23 21:49:51 -0800182 u8 s_nak_state;
183 u32 s_nak_psn;
Kaike Wana0b34f72019-01-24 06:36:48 -0800184 u32 s_flags;
Kaike Wan72a0ea92019-01-23 21:49:31 -0800185 u32 s_tid_cur;
186 u32 s_tid_head;
187 u32 s_tid_tail;
Kaike Wan07b923702019-01-23 21:48:59 -0800188 u32 r_tid_head; /* Most recently added TID RDMA request */
189 u32 r_tid_tail; /* the last completed TID RDMA request */
190 u32 r_tid_ack; /* the TID RDMA request to be ACK'ed */
191 u32 r_tid_alloc; /* Request for which we are allocating resources */
192 u32 pending_tid_w_segs; /* Num of pending tid write segments */
193 u32 alloc_w_segs; /* Number of segments for which write */
194 /* resources have been allocated for this QP */
Kaike Wana0b34f72019-01-24 06:36:48 -0800195
Kaike Wan742a3822019-01-23 19:30:40 -0800196 /* For TID RDMA READ */
Kaike Wan9905bf02019-02-05 14:13:30 -0800197 u32 tid_r_reqs; /* Num of tid reads requested */
198 u32 tid_r_comp; /* Num of tid reads completed */
Kaike Wan742a3822019-01-23 19:30:40 -0800199 u32 pending_tid_r_segs; /* Num of pending tid read segments */
Kaike Wand22a2072019-01-23 19:20:42 -0800200 u16 pkts_ps; /* packets per segment */
201 u8 timeout_shift; /* account for number of packets per segment */
Kaike Wan07b923702019-01-23 21:48:59 -0800202
Kaike Wand72fe7d2019-01-23 21:49:51 -0800203 u32 r_next_psn_kdeth;
Kaike Wan0f75e322019-01-23 21:50:03 -0800204 u32 r_next_psn_kdeth_save;
Kaike Wan9e93e962019-01-23 21:50:14 -0800205 u32 s_resync_psn;
Kaike Wan07b923702019-01-23 21:48:59 -0800206 u8 sync_pt; /* Set when QP reaches sync point */
Kaike Wan0f75e322019-01-23 21:50:03 -0800207 u8 resync;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400208};
209
Kaike Wan07b923702019-01-23 21:48:59 -0800210#define HFI1_QP_WQE_INVALID ((u32)-1)
211
Kaike Wan838b6fd2019-01-23 19:30:07 -0800212struct hfi1_swqe_priv {
213 struct tid_rdma_request tid_req;
Kaike Wan742a3822019-01-23 19:30:40 -0800214 struct rvt_sge_state ss; /* Used for TID RDMA READ Request */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800215};
216
217struct hfi1_ack_priv {
Kaike Wan38d46d32019-01-23 21:49:09 -0800218 struct rvt_sge_state ss; /* used for TID WRITE RESP */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800219 struct tid_rdma_request tid_req;
220};
221
Mike Marciniszyn77241052015-07-30 15:17:43 -0400222/*
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500223 * This structure is used to hold commonly lookedup and computed values during
224 * the send engine progress.
225 */
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700226struct iowait_work;
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500227struct hfi1_pkt_state {
228 struct hfi1_ibdev *dev;
229 struct hfi1_ibport *ibp;
230 struct hfi1_pportdata *ppd;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800231 struct verbs_txreq *s_txreq;
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700232 struct iowait_work *wait;
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700233 unsigned long flags;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700234 unsigned long timeout;
235 unsigned long timeout_int;
236 int cpu;
Don Hiatt566d53a2017-08-04 13:54:47 -0700237 u8 opcode;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700238 bool in_thread;
Kaike Wanbcad2912017-07-24 07:45:37 -0700239 bool pkts_sent;
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500240};
241
Mike Marciniszyn77241052015-07-30 15:17:43 -0400242#define HFI1_PSN_CREDIT 16
243
Mike Marciniszyn77241052015-07-30 15:17:43 -0400244struct hfi1_opcode_stats {
245 u64 n_packets; /* number of packets */
246 u64 n_bytes; /* total number of bytes */
247};
248
249struct hfi1_opcode_stats_perctx {
250 struct hfi1_opcode_stats stats[256];
251};
252
253static inline void inc_opstats(
254 u32 tlen,
255 struct hfi1_opcode_stats *stats)
256{
257#ifdef CONFIG_DEBUG_FS
258 stats->n_bytes += tlen;
259 stats->n_packets++;
260#endif
261}
262
263struct hfi1_ibport {
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800264 struct rvt_qp __rcu *qp[2];
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800265 struct rvt_ibport rvp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400266
Mike Marciniszyn77241052015-07-30 15:17:43 -0400267 /* the first 16 entries are sl_to_vl for !OPA */
268 u8 sl_to_sc[32];
269 u8 sc_to_sl[32];
270};
271
Mike Marciniszyn77241052015-07-30 15:17:43 -0400272struct hfi1_ibdev {
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -0800273 struct rvt_dev_info rdi; /* Must be first */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400274
Mike Marciniszyn77241052015-07-30 15:17:43 -0400275 /* QP numbers are shared by all IB ports */
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700276 /* protect txwait list */
277 seqlock_t txwait_lock ____cacheline_aligned_in_smp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400278 struct list_head txwait; /* list for wait verbs_txreq */
279 struct list_head memwait; /* list for wait kernel memory */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400280 struct kmem_cache *verbs_txreq_cache;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400281 u64 n_txwait;
282 u64 n_kmem_wait;
Kaike Wan2f16a692019-01-23 19:30:18 -0800283 u64 n_tidwait;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400284
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700285 /* protect iowait lists */
286 seqlock_t iowait_lock ____cacheline_aligned_in_smp;
287 u64 n_piowait;
288 u64 n_piodrain;
289 struct timer_list mem_timer;
290
Mike Marciniszyn77241052015-07-30 15:17:43 -0400291#ifdef CONFIG_DEBUG_FS
292 /* per HFI debugfs */
293 struct dentry *hfi1_ibdev_dbg;
294 /* per HFI symlinks to above */
295 struct dentry *hfi1_ibdev_link;
Don Hiatt0181ce32017-03-20 17:26:14 -0700296#ifdef CONFIG_FAULT_INJECTION
Mitko Haralanova74d5302018-05-02 06:43:24 -0700297 struct fault *fault;
Don Hiatt0181ce32017-03-20 17:26:14 -0700298#endif
Mike Marciniszyn77241052015-07-30 15:17:43 -0400299#endif
300};
301
Mike Marciniszyn77241052015-07-30 15:17:43 -0400302static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
303{
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -0800304 struct rvt_dev_info *rdi;
305
306 rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
307 return container_of(rdi, struct hfi1_ibdev, rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400308}
309
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700310static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800311{
312 struct hfi1_qp_priv *priv;
313
314 priv = container_of(s_iowait, struct hfi1_qp_priv, s_iowait);
315 return priv->owner;
316}
317
Mike Marciniszyn77241052015-07-30 15:17:43 -0400318/*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400319 * This must be called with s_lock held.
320 */
Dennis Dalessandro13d84912017-05-29 17:22:01 -0700321void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
Don Hiatt88733e32017-08-04 13:54:23 -0700322 u32 qp1, u32 qp2, u32 lid1, u32 lid2);
Harish Chegondi45b59ee2016-02-03 14:36:49 -0800323void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400324void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
325void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
326int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
327 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
328 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
329 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
330 u16 *out_mad_pkey_index);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400331
332/*
333 * The PSN_MASK and PSN_SHIFT allow for
334 * 1) comparing two PSNs
335 * 2) returning the PSN with any upper bits masked
336 * 3) returning the difference between to PSNs
337 *
338 * The number of significant bits in the PSN must
339 * necessarily be at least one bit less than
340 * the container holding the PSN.
341 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400342#define PSN_MASK 0x7FFFFFFF
343#define PSN_SHIFT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400344#define PSN_MODIFY_MASK 0xFFFFFF
345
Mike Marciniszyn77241052015-07-30 15:17:43 -0400346/*
Mike Marciniszyn77241052015-07-30 15:17:43 -0400347 * Compare two PSNs
348 * Returns an integer <, ==, or > than zero.
349 */
350static inline int cmp_psn(u32 a, u32 b)
351{
Jubin John50e5dcb2016-02-14 20:19:41 -0800352 return (((int)a) - ((int)b)) << PSN_SHIFT;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400353}
354
355/*
356 * Return masked PSN
357 */
358static inline u32 mask_psn(u32 a)
359{
360 return a & PSN_MASK;
361}
362
363/*
364 * Return delta between two PSNs
365 */
366static inline u32 delta_psn(u32 a, u32 b)
367{
368 return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
369}
370
Kaike Wan742a3822019-01-23 19:30:40 -0800371static inline struct tid_rdma_request *wqe_to_tid_req(struct rvt_swqe *wqe)
372{
373 return &((struct hfi1_swqe_priv *)wqe->priv)->tid_req;
374}
375
Kaike Wand0d564a2019-01-23 19:31:02 -0800376static inline struct tid_rdma_request *ack_to_tid_req(struct rvt_ack_entry *e)
377{
378 return &((struct hfi1_ack_priv *)e->priv)->tid_req;
379}
380
Kaike Wan838b6fd2019-01-23 19:30:07 -0800381/*
382 * Look through all the active flows for a TID RDMA request and find
383 * the one (if it exists) that contains the specified PSN.
384 */
385static inline u32 __full_flow_psn(struct flow_state *state, u32 psn)
386{
387 return mask_psn((state->generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
388 (psn & HFI1_KDETH_BTH_SEQ_MASK));
389}
390
391static inline u32 full_flow_psn(struct tid_rdma_flow *flow, u32 psn)
392{
393 return __full_flow_psn(&flow->flow_state, psn);
394}
395
Mike Marciniszyn77241052015-07-30 15:17:43 -0400396struct verbs_txreq;
397void hfi1_put_txreq(struct verbs_txreq *tx);
398
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800399int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400400
Mike Marciniszyn77241052015-07-30 15:17:43 -0400401void hfi1_cnp_rcv(struct hfi1_packet *packet);
402
403void hfi1_uc_rcv(struct hfi1_packet *packet);
404
405void hfi1_rc_rcv(struct hfi1_packet *packet);
406
407void hfi1_rc_hdrerr(
408 struct hfi1_ctxtdata *rcd,
Don Hiatt90397462017-05-12 09:20:20 -0700409 struct hfi1_packet *packet,
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800410 struct rvt_qp *qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400411
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400412u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400413
Don Hiatt30e07412017-08-04 13:54:04 -0700414void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400415
Mike Marciniszyn77241052015-07-30 15:17:43 -0400416void hfi1_ud_rcv(struct hfi1_packet *packet);
417
418int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
419
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800420void hfi1_migrate_qp(struct rvt_qp *qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400421
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800422int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
423 int attr_mask, struct ib_udata *udata);
424
425void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
426 int attr_mask, struct ib_udata *udata);
Venkata Sandeep Dhanalakota56acbbf2017-02-08 05:27:19 -0800427void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
Kaike Wand205a06a2018-09-26 10:26:44 -0700428int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
429 bool *call_send);
Ira Weiny31e7af12016-02-03 14:33:14 -0800430
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800431extern const u32 rc_only_opcode;
432extern const u32 uc_only_opcode;
433
Don Hiatt90397462017-05-12 09:20:20 -0700434int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400435
436u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400437 const struct ib_global_route *grh, u32 hwords, u32 nwords);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400438
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700439void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
Mitko Haralanov44e43d92019-01-24 06:09:46 -0800440 u32 bth0, u32 bth1, u32 bth2, int middle,
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800441 struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400442
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800443void _hfi1_do_send(struct work_struct *work);
444
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700445void hfi1_do_send_from_rvt(struct rvt_qp *qp);
446
447void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400448
Sebastian Sanchezbdaf96f2018-02-01 10:46:31 -0800449void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400450
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800451int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400452
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800453int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400454
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800455int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400456
457int hfi1_register_ib_device(struct hfi1_devdata *);
458
459void hfi1_unregister_ib_device(struct hfi1_devdata *);
460
Kaike Wan22d136d2019-01-24 06:36:34 -0800461void hfi1_kdeth_eager_rcv(struct hfi1_packet *packet);
462
463void hfi1_kdeth_expected_rcv(struct hfi1_packet *packet);
464
Mike Marciniszyn77241052015-07-30 15:17:43 -0400465void hfi1_ib_rcv(struct hfi1_packet *packet);
466
Don Hiatt72c07e22017-08-04 13:53:58 -0700467void hfi1_16B_rcv(struct hfi1_packet *packet);
468
Mike Marciniszyn77241052015-07-30 15:17:43 -0400469unsigned hfi1_get_npkeys(struct hfi1_devdata *);
470
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800471int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500472 u64 pbc);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400473
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800474int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500475 u64 pbc);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400476
Sebastian Sanchez6d6b8842018-02-01 10:46:23 -0800477static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr)
478{
479 return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ);
480}
481
Kaike Wan838b6fd2019-01-23 19:30:07 -0800482void hfi1_wait_kmem(struct rvt_qp *qp);
483
484static inline void hfi1_trdma_send_complete(struct rvt_qp *qp,
485 struct rvt_swqe *wqe,
486 enum ib_wc_status status)
487{
488 trdma_clean_swqe(qp, wqe);
489 rvt_send_complete(qp, wqe, status);
490}
491
Mike Marciniszyn77241052015-07-30 15:17:43 -0400492extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
493
494extern const u8 hdr_len_by_opcode[];
495
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800496extern const int ib_rvt_state_ops[];
Mike Marciniszyn77241052015-07-30 15:17:43 -0400497
498extern __be64 ib_hfi1_sys_image_guid; /* in network order */
499
Mike Marciniszyn77241052015-07-30 15:17:43 -0400500extern unsigned int hfi1_max_cqes;
501
502extern unsigned int hfi1_max_cqs;
503
504extern unsigned int hfi1_max_qp_wrs;
505
506extern unsigned int hfi1_max_qps;
507
508extern unsigned int hfi1_max_sges;
509
510extern unsigned int hfi1_max_mcast_grps;
511
512extern unsigned int hfi1_max_mcast_qp_attached;
513
514extern unsigned int hfi1_max_srqs;
515
516extern unsigned int hfi1_max_srq_sges;
517
518extern unsigned int hfi1_max_srq_wrs;
519
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800520extern unsigned short piothreshold;
521
Mike Marciniszyn77241052015-07-30 15:17:43 -0400522extern const u32 ib_hfi1_rnr_table[];
523
Mike Marciniszyn77241052015-07-30 15:17:43 -0400524#endif /* HFI1_VERBS_H */