Mike Marciniszyn | 5190f05 | 2018-11-28 10:22:31 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ |
| 2 | /* |
| 3 | * Copyright(c) 2018 Intel Corporation. |
| 4 | * |
| 5 | */ |
| 6 | #ifndef HFI1_TID_RDMA_H |
| 7 | #define HFI1_TID_RDMA_H |
| 8 | |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 9 | #include <linux/circ_buf.h> |
| 10 | #include "common.h" |
| 11 | |
| 12 | /* Add a convenience helper */ |
| 13 | #define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1)) |
| 14 | #define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size) |
| 15 | #define CIRC_PREV(val, size) CIRC_ADD(val, -1, size) |
| 16 | |
Kaike Wan | f1ab4ef | 2019-01-23 19:32:30 -0800 | [diff] [blame] | 17 | #define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */ |
Kaike Wan | d22a207 | 2019-01-23 19:20:42 -0800 | [diff] [blame] | 18 | #define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */ |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 19 | #define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT) |
Kaike Wan | d22a207 | 2019-01-23 19:20:42 -0800 | [diff] [blame] | 20 | |
Kaike Wan | a0b34f7 | 2019-01-24 06:36:48 -0800 | [diff] [blame] | 21 | /* |
| 22 | * Bit definitions for priv->s_flags. |
| 23 | * These bit flags overload the bit flags defined for the QP's s_flags. |
| 24 | * Due to the fact that these bit fields are used only for the QP priv |
| 25 | * s_flags, there are no collisions. |
| 26 | * |
| 27 | * HFI1_S_TID_WAIT_INTERLCK - QP is waiting for requester interlock |
Kaike Wan | c6c2311 | 2019-01-23 21:51:49 -0800 | [diff] [blame] | 28 | * HFI1_R_TID_WAIT_INTERLCK - QP is waiting for responder interlock |
Kaike Wan | a0b34f7 | 2019-01-24 06:36:48 -0800 | [diff] [blame] | 29 | */ |
Kaike Wan | 70dcb2e | 2019-01-23 21:51:07 -0800 | [diff] [blame] | 30 | #define HFI1_S_TID_BUSY_SET BIT(0) |
| 31 | /* BIT(1) reserved for RVT_S_BUSY. */ |
Kaike Wan | 3c759e0 | 2019-01-23 21:49:19 -0800 | [diff] [blame] | 32 | #define HFI1_R_TID_RSC_TIMER BIT(2) |
Kaike Wan | 70dcb2e | 2019-01-23 21:51:07 -0800 | [diff] [blame] | 33 | /* BIT(3) reserved for RVT_S_RESP_PENDING. */ |
Kaike Wan | 07b9237 | 2019-01-23 21:48:59 -0800 | [diff] [blame] | 34 | /* BIT(4) reserved for RVT_S_ACK_PENDING. */ |
Kaike Wan | a0b34f7 | 2019-01-24 06:36:48 -0800 | [diff] [blame] | 35 | #define HFI1_S_TID_WAIT_INTERLCK BIT(5) |
Kaike Wan | c6c2311 | 2019-01-23 21:51:49 -0800 | [diff] [blame] | 36 | #define HFI1_R_TID_WAIT_INTERLCK BIT(6) |
Kaike Wan | 70dcb2e | 2019-01-23 21:51:07 -0800 | [diff] [blame] | 37 | /* BIT(7) - BIT(15) reserved for RVT_S_WAIT_*. */ |
Kaike Wan | c6c2311 | 2019-01-23 21:51:49 -0800 | [diff] [blame] | 38 | /* BIT(16) reserved for RVT_S_SEND_ONE */ |
Kaike Wan | 829eaee | 2019-01-23 21:50:24 -0800 | [diff] [blame] | 39 | #define HFI1_S_TID_RETRY_TIMER BIT(17) |
Kaike Wan | c6c2311 | 2019-01-23 21:51:49 -0800 | [diff] [blame] | 40 | /* BIT(18) reserved for RVT_S_ECN. */ |
Kaike Wan | 07b9237 | 2019-01-23 21:48:59 -0800 | [diff] [blame] | 41 | #define HFI1_R_TID_SW_PSN BIT(19) |
Kaike Wan | c6c2311 | 2019-01-23 21:51:49 -0800 | [diff] [blame] | 42 | /* BIT(26) reserved for HFI1_S_WAIT_HALT */ |
| 43 | /* BIT(27) reserved for HFI1_S_WAIT_TID_RESP */ |
| 44 | /* BIT(28) reserved for HFI1_S_WAIT_TID_SPACE */ |
Kaike Wan | a0b34f7 | 2019-01-24 06:36:48 -0800 | [diff] [blame] | 45 | |
Kaike Wan | f5a4a95 | 2019-01-23 21:48:38 -0800 | [diff] [blame] | 46 | /* |
| 47 | * Unlike regular IB RDMA VERBS, which do not require an entry |
| 48 | * in the s_ack_queue, TID RDMA WRITE requests do because they |
| 49 | * generate responses. |
| 50 | * Therefore, the s_ack_queue needs to be extended by a certain |
| 51 | * amount. The key point is that the queue needs to be extended |
| 52 | * without letting the "user" know so they user doesn't end up |
| 53 | * using these extra entries. |
| 54 | */ |
| 55 | #define HFI1_TID_RDMA_WRITE_CNT 8 |
| 56 | |
Kaike Wan | d22a207 | 2019-01-23 19:20:42 -0800 | [diff] [blame] | 57 | struct tid_rdma_params { |
| 58 | struct rcu_head rcu_head; |
| 59 | u32 qp; |
| 60 | u32 max_len; |
| 61 | u16 jkey; |
| 62 | u8 max_read; |
| 63 | u8 max_write; |
| 64 | u8 timeout; |
| 65 | u8 urg; |
| 66 | u8 version; |
| 67 | }; |
| 68 | |
| 69 | struct tid_rdma_qp_params { |
Kaike Wan | 37356e7 | 2019-02-05 14:13:13 -0800 | [diff] [blame] | 70 | struct work_struct trigger_work; |
Kaike Wan | d22a207 | 2019-01-23 19:20:42 -0800 | [diff] [blame] | 71 | struct tid_rdma_params local; |
| 72 | struct tid_rdma_params __rcu *remote; |
| 73 | }; |
| 74 | |
Kaike Wan | 37356e7 | 2019-02-05 14:13:13 -0800 | [diff] [blame] | 75 | /* Track state for each hardware flow */ |
| 76 | struct tid_flow_state { |
| 77 | u32 generation; |
| 78 | u32 psn; |
Kaike Wan | 37356e7 | 2019-02-05 14:13:13 -0800 | [diff] [blame] | 79 | u8 index; |
| 80 | u8 last_index; |
Kaike Wan | 37356e7 | 2019-02-05 14:13:13 -0800 | [diff] [blame] | 81 | }; |
| 82 | |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 83 | enum tid_rdma_req_state { |
| 84 | TID_REQUEST_INACTIVE = 0, |
| 85 | TID_REQUEST_INIT, |
| 86 | TID_REQUEST_INIT_RESEND, |
| 87 | TID_REQUEST_ACTIVE, |
| 88 | TID_REQUEST_RESEND, |
| 89 | TID_REQUEST_RESEND_ACTIVE, |
| 90 | TID_REQUEST_QUEUED, |
| 91 | TID_REQUEST_SYNC, |
| 92 | TID_REQUEST_RNR_NAK, |
| 93 | TID_REQUEST_COMPLETE, |
| 94 | }; |
| 95 | |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 96 | struct tid_rdma_request { |
| 97 | struct rvt_qp *qp; |
| 98 | struct hfi1_ctxtdata *rcd; |
| 99 | union { |
| 100 | struct rvt_swqe *swqe; |
| 101 | struct rvt_ack_entry *ack; |
| 102 | } e; |
| 103 | |
| 104 | struct tid_rdma_flow *flows; /* array of tid flows */ |
Kaike Wan | 07b9237 | 2019-01-23 21:48:59 -0800 | [diff] [blame] | 105 | struct rvt_sge_state ss; /* SGE state for TID RDMA requests */ |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 106 | u16 n_flows; /* size of the flow buffer window */ |
| 107 | u16 setup_head; /* flow index we are setting up */ |
| 108 | u16 clear_tail; /* flow index we are clearing */ |
| 109 | u16 flow_idx; /* flow index most recently set up */ |
Kaike Wan | 07b9237 | 2019-01-23 21:48:59 -0800 | [diff] [blame] | 110 | u16 acked_tail; |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 111 | |
| 112 | u32 seg_len; |
Kaike Wan | d0d564a | 2019-01-23 19:31:02 -0800 | [diff] [blame] | 113 | u32 total_len; |
Kaike Wan | 9e93e96 | 2019-01-23 21:50:14 -0800 | [diff] [blame] | 114 | u32 r_ack_psn; /* next expected ack PSN */ |
Kaike Wan | d0d564a | 2019-01-23 19:31:02 -0800 | [diff] [blame] | 115 | u32 r_flow_psn; /* IB PSN of next segment start */ |
Kaike Wan | 72a0ea9 | 2019-01-23 21:49:31 -0800 | [diff] [blame] | 116 | u32 r_last_acked; /* IB PSN of last ACK'ed packet */ |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 117 | u32 s_next_psn; /* IB PSN of next segment start for read */ |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 118 | |
Kaike Wan | d0d564a | 2019-01-23 19:31:02 -0800 | [diff] [blame] | 119 | u32 total_segs; /* segments required to complete a request */ |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 120 | u32 cur_seg; /* index of current segment */ |
Kaike Wan | d0d564a | 2019-01-23 19:31:02 -0800 | [diff] [blame] | 121 | u32 comp_seg; /* index of last completed segment */ |
| 122 | u32 ack_seg; /* index of last ack'ed segment */ |
Kaike Wan | 07b9237 | 2019-01-23 21:48:59 -0800 | [diff] [blame] | 123 | u32 alloc_seg; /* index of next segment to be allocated */ |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 124 | u32 isge; /* index of "current" sge */ |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 125 | u32 ack_pending; /* num acks pending for this request */ |
| 126 | |
| 127 | enum tid_rdma_req_state state; |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 128 | }; |
| 129 | |
| 130 | /* |
| 131 | * When header suppression is used, PSNs associated with a "flow" are |
| 132 | * relevant (and not the PSNs maintained by verbs). Track per-flow |
| 133 | * PSNs here for a TID RDMA segment. |
| 134 | * |
| 135 | */ |
| 136 | struct flow_state { |
| 137 | u32 flags; |
| 138 | u32 resp_ib_psn; /* The IB PSN of the response for this flow */ |
| 139 | u32 generation; /* generation of flow */ |
| 140 | u32 spsn; /* starting PSN in TID space */ |
| 141 | u32 lpsn; /* last PSN in TID space */ |
| 142 | u32 r_next_psn; /* next PSN to be received (in TID space) */ |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 143 | |
| 144 | /* For tid rdma read */ |
| 145 | u32 ib_spsn; /* starting PSN in Verbs space */ |
| 146 | u32 ib_lpsn; /* last PSn in Verbs space */ |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 147 | }; |
| 148 | |
| 149 | struct tid_rdma_pageset { |
| 150 | dma_addr_t addr : 48; /* Only needed for the first page */ |
| 151 | u8 idx: 8; |
| 152 | u8 count : 7; |
| 153 | u8 mapped: 1; |
| 154 | }; |
| 155 | |
| 156 | /** |
| 157 | * kern_tid_node - used for managing TID's in TID groups |
| 158 | * |
| 159 | * @grp_idx: rcd relative index to tid_group |
| 160 | * @map: grp->map captured prior to programming this TID group in HW |
| 161 | * @cnt: Only @cnt of available group entries are actually programmed |
| 162 | */ |
| 163 | struct kern_tid_node { |
| 164 | struct tid_group *grp; |
| 165 | u8 map; |
| 166 | u8 cnt; |
| 167 | }; |
| 168 | |
| 169 | /* Overall info for a TID RDMA segment */ |
| 170 | struct tid_rdma_flow { |
| 171 | /* |
| 172 | * While a TID RDMA segment is being transferred, it uses a QP number |
| 173 | * from the "KDETH section of QP numbers" (which is different from the |
| 174 | * QP number that originated the request). Bits 11-15 of these QP |
| 175 | * numbers identify the "TID flow" for the segment. |
| 176 | */ |
| 177 | struct flow_state flow_state; |
| 178 | struct tid_rdma_request *req; |
Kaike Wan | d0d564a | 2019-01-23 19:31:02 -0800 | [diff] [blame] | 179 | u32 tid_qpn; |
| 180 | u32 tid_offset; |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 181 | u32 length; |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 182 | u32 sent; |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 183 | u8 tnode_cnt; |
| 184 | u8 tidcnt; |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 185 | u8 tid_idx; |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 186 | u8 idx; |
| 187 | u8 npagesets; |
| 188 | u8 npkts; |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 189 | u8 pkt; |
Kaike Wan | 72a0ea9 | 2019-01-23 21:49:31 -0800 | [diff] [blame] | 190 | u8 resync_npkts; |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 191 | struct kern_tid_node tnode[TID_RDMA_MAX_PAGES]; |
| 192 | struct tid_rdma_pageset pagesets[TID_RDMA_MAX_PAGES]; |
| 193 | u32 tid_entry[TID_RDMA_MAX_PAGES]; |
| 194 | }; |
| 195 | |
Kaike Wan | 07b9237 | 2019-01-23 21:48:59 -0800 | [diff] [blame] | 196 | enum tid_rnr_nak_state { |
| 197 | TID_RNR_NAK_INIT = 0, |
| 198 | TID_RNR_NAK_SEND, |
| 199 | TID_RNR_NAK_SENT, |
| 200 | }; |
| 201 | |
Kaike Wan | d22a207 | 2019-01-23 19:20:42 -0800 | [diff] [blame] | 202 | bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data); |
| 203 | bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data); |
| 204 | bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data); |
| 205 | void tid_rdma_conn_error(struct rvt_qp *qp); |
| 206 | void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p); |
| 207 | |
| 208 | int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit); |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 209 | int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req, |
| 210 | struct rvt_sge_state *ss, bool *last); |
| 211 | int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req); |
| 212 | void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req); |
| 213 | void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe); |
| 214 | |
| 215 | /** |
| 216 | * trdma_clean_swqe - clean flows for swqe if large send queue |
| 217 | * @qp: the qp |
| 218 | * @wqe: the send wqe |
| 219 | */ |
| 220 | static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) |
| 221 | { |
| 222 | if (!wqe->priv) |
| 223 | return; |
| 224 | __trdma_clean_swqe(qp, wqe); |
| 225 | } |
Kaike Wan | d22a207 | 2019-01-23 19:20:42 -0800 | [diff] [blame] | 226 | |
Kaike Wan | 9905bf0 | 2019-02-05 14:13:30 -0800 | [diff] [blame] | 227 | void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp); |
| 228 | |
Mike Marciniszyn | 5190f05 | 2018-11-28 10:22:31 -0800 | [diff] [blame] | 229 | int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
| 230 | struct ib_qp_init_attr *init_attr); |
Kaike Wan | 48a615d | 2019-01-23 19:21:11 -0800 | [diff] [blame] | 231 | void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); |
Mike Marciniszyn | 5190f05 | 2018-11-28 10:22:31 -0800 | [diff] [blame] | 232 | |
Kaike Wan | 37356e7 | 2019-02-05 14:13:13 -0800 | [diff] [blame] | 233 | void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp); |
| 234 | |
| 235 | int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp); |
| 236 | void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp); |
| 237 | void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd); |
| 238 | |
Kaike Wan | 2f16a69 | 2019-01-23 19:30:18 -0800 | [diff] [blame] | 239 | struct cntr_entry; |
| 240 | u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry, |
| 241 | void *context, int vl, int mode, u64 data); |
| 242 | |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 243 | u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, |
| 244 | struct ib_other_headers *ohdr, |
| 245 | u32 *bth1, u32 *bth2, u32 *len); |
| 246 | u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, |
| 247 | struct ib_other_headers *ohdr, u32 *bth1, |
| 248 | u32 *bth2, u32 *len); |
Kaike Wan | d0d564a | 2019-01-23 19:31:02 -0800 | [diff] [blame] | 249 | void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet); |
Kaike Wan | 1db21b5 | 2019-01-23 19:31:12 -0800 | [diff] [blame] | 250 | u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, |
| 251 | struct ib_other_headers *ohdr, u32 *bth0, |
| 252 | u32 *bth1, u32 *bth2, u32 *len, bool *last); |
Kaike Wan | 9905bf0 | 2019-02-05 14:13:30 -0800 | [diff] [blame] | 253 | void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet); |
| 254 | bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, |
| 255 | struct hfi1_pportdata *ppd, |
| 256 | struct hfi1_packet *packet); |
Kaike Wan | b126078 | 2019-01-23 19:31:46 -0800 | [diff] [blame] | 257 | void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, |
| 258 | u32 *bth2); |
Kaike Wan | 24b1192 | 2019-01-23 19:32:09 -0800 | [diff] [blame] | 259 | void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp); |
Kaike Wan | a0b34f7 | 2019-01-24 06:36:48 -0800 | [diff] [blame] | 260 | bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe); |
Kaike Wan | 742a382 | 2019-01-23 19:30:40 -0800 | [diff] [blame] | 261 | |
Kaike Wan | f1ab4ef | 2019-01-23 19:32:30 -0800 | [diff] [blame] | 262 | void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); |
| 263 | static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp, |
| 264 | struct rvt_swqe *wqe) |
| 265 | { |
| 266 | if (wqe->priv && |
Kaike Wan | ad00889 | 2019-01-23 21:51:59 -0800 | [diff] [blame] | 267 | (wqe->wr.opcode == IB_WR_RDMA_READ || |
| 268 | wqe->wr.opcode == IB_WR_RDMA_WRITE) && |
Kaike Wan | f1ab4ef | 2019-01-23 19:32:30 -0800 | [diff] [blame] | 269 | wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE) |
| 270 | setup_tid_rdma_wqe(qp, wqe); |
| 271 | } |
| 272 | |
Kaike Wan | c098bbb | 2019-01-23 21:48:28 -0800 | [diff] [blame] | 273 | u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe, |
| 274 | struct ib_other_headers *ohdr, |
| 275 | u32 *bth1, u32 *bth2, u32 *len); |
Kaike Wan | 07b9237 | 2019-01-23 21:48:59 -0800 | [diff] [blame] | 276 | |
| 277 | void hfi1_compute_tid_rdma_flow_wt(void); |
| 278 | |
| 279 | void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet); |
| 280 | |
Kaike Wan | 38d46d3 | 2019-01-23 21:49:09 -0800 | [diff] [blame] | 281 | u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e, |
| 282 | struct ib_other_headers *ohdr, u32 *bth1, |
| 283 | u32 bth2, u32 *len, |
| 284 | struct rvt_sge_state **ss); |
| 285 | |
Kaike Wan | 3c759e0 | 2019-01-23 21:49:19 -0800 | [diff] [blame] | 286 | void hfi1_del_tid_reap_timer(struct rvt_qp *qp); |
| 287 | |
Kaike Wan | 72a0ea9 | 2019-01-23 21:49:31 -0800 | [diff] [blame] | 288 | void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet); |
| 289 | |
Kaike Wan | 539e190 | 2019-01-23 21:49:41 -0800 | [diff] [blame] | 290 | bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe, |
| 291 | struct ib_other_headers *ohdr, |
| 292 | u32 *bth1, u32 *bth2, u32 *len); |
| 293 | |
Kaike Wan | d72fe7d | 2019-01-23 21:49:51 -0800 | [diff] [blame] | 294 | void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet); |
| 295 | |
Kaike Wan | 0f75e32 | 2019-01-23 21:50:03 -0800 | [diff] [blame] | 296 | u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e, |
| 297 | struct ib_other_headers *ohdr, u16 iflow, |
| 298 | u32 *bth1, u32 *bth2); |
| 299 | |
Kaike Wan | 9e93e96 | 2019-01-23 21:50:14 -0800 | [diff] [blame] | 300 | void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet); |
| 301 | |
Kaike Wan | 829eaee | 2019-01-23 21:50:24 -0800 | [diff] [blame] | 302 | void hfi1_add_tid_retry_timer(struct rvt_qp *qp); |
| 303 | void hfi1_del_tid_retry_timer(struct rvt_qp *qp); |
| 304 | |
Kaike Wan | 6e391c6a | 2019-01-23 21:50:36 -0800 | [diff] [blame] | 305 | u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe, |
| 306 | struct ib_other_headers *ohdr, u32 *bth1, |
| 307 | u32 *bth2, u16 fidx); |
| 308 | |
Kaike Wan | 7cf0ad6 | 2019-01-23 21:50:46 -0800 | [diff] [blame] | 309 | void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet); |
| 310 | |
Kaike Wan | 70dcb2e | 2019-01-23 21:51:07 -0800 | [diff] [blame] | 311 | struct hfi1_pkt_state; |
| 312 | int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps); |
| 313 | |
Kaike Wan | 572f0c3 | 2019-01-23 21:51:27 -0800 | [diff] [blame] | 314 | void _hfi1_do_tid_send(struct work_struct *work); |
| 315 | |
| 316 | bool hfi1_schedule_tid_send(struct rvt_qp *qp); |
| 317 | |
Kaike Wan | c6c2311 | 2019-01-23 21:51:49 -0800 | [diff] [blame] | 318 | bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e); |
| 319 | |
Mike Marciniszyn | 5190f05 | 2018-11-28 10:22:31 -0800 | [diff] [blame] | 320 | #endif /* HFI1_TID_RDMA_H */ |