blob: 1c536185261ee966e0f55bd6b310af73bd2736e5 [file] [log] [blame]
Mike Marciniszyn5190f052018-11-28 10:22:31 -08001/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2/*
3 * Copyright(c) 2018 Intel Corporation.
4 *
5 */
6#ifndef HFI1_TID_RDMA_H
7#define HFI1_TID_RDMA_H
8
Kaike Wan838b6fd2019-01-23 19:30:07 -08009#include <linux/circ_buf.h>
10#include "common.h"
11
12/* Add a convenience helper */
13#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
14#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
15#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
16
Kaike Wanf1ab4ef2019-01-23 19:32:30 -080017#define TID_RDMA_MIN_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
Kaike Wand22a2072019-01-23 19:20:42 -080018#define TID_RDMA_MAX_SEGMENT_SIZE BIT(18) /* 256 KiB (for now) */
Kaike Wan838b6fd2019-01-23 19:30:07 -080019#define TID_RDMA_MAX_PAGES (BIT(18) >> PAGE_SHIFT)
Kaike Wand22a2072019-01-23 19:20:42 -080020
Kaike Wana0b34f72019-01-24 06:36:48 -080021/*
22 * Bit definitions for priv->s_flags.
23 * These bit flags overload the bit flags defined for the QP's s_flags.
24 * Due to the fact that these bit fields are used only for the QP priv
25 * s_flags, there are no collisions.
26 *
27 * HFI1_S_TID_WAIT_INTERLCK - QP is waiting for requester interlock
Kaike Wanc6c23112019-01-23 21:51:49 -080028 * HFI1_R_TID_WAIT_INTERLCK - QP is waiting for responder interlock
Kaike Wana0b34f72019-01-24 06:36:48 -080029 */
Kaike Wan70dcb2e2019-01-23 21:51:07 -080030#define HFI1_S_TID_BUSY_SET BIT(0)
31/* BIT(1) reserved for RVT_S_BUSY. */
Kaike Wan3c759e02019-01-23 21:49:19 -080032#define HFI1_R_TID_RSC_TIMER BIT(2)
Kaike Wan70dcb2e2019-01-23 21:51:07 -080033/* BIT(3) reserved for RVT_S_RESP_PENDING. */
Kaike Wan07b92372019-01-23 21:48:59 -080034/* BIT(4) reserved for RVT_S_ACK_PENDING. */
Kaike Wana0b34f72019-01-24 06:36:48 -080035#define HFI1_S_TID_WAIT_INTERLCK BIT(5)
Kaike Wanc6c23112019-01-23 21:51:49 -080036#define HFI1_R_TID_WAIT_INTERLCK BIT(6)
Kaike Wan70dcb2e2019-01-23 21:51:07 -080037/* BIT(7) - BIT(15) reserved for RVT_S_WAIT_*. */
Kaike Wanc6c23112019-01-23 21:51:49 -080038/* BIT(16) reserved for RVT_S_SEND_ONE */
Kaike Wan829eaee2019-01-23 21:50:24 -080039#define HFI1_S_TID_RETRY_TIMER BIT(17)
Kaike Wanc6c23112019-01-23 21:51:49 -080040/* BIT(18) reserved for RVT_S_ECN. */
Kaike Wan07b92372019-01-23 21:48:59 -080041#define HFI1_R_TID_SW_PSN BIT(19)
Kaike Wanc6c23112019-01-23 21:51:49 -080042/* BIT(26) reserved for HFI1_S_WAIT_HALT */
43/* BIT(27) reserved for HFI1_S_WAIT_TID_RESP */
44/* BIT(28) reserved for HFI1_S_WAIT_TID_SPACE */
Kaike Wana0b34f72019-01-24 06:36:48 -080045
Kaike Wanf5a4a952019-01-23 21:48:38 -080046/*
47 * Unlike regular IB RDMA VERBS, which do not require an entry
48 * in the s_ack_queue, TID RDMA WRITE requests do because they
49 * generate responses.
50 * Therefore, the s_ack_queue needs to be extended by a certain
51 * amount. The key point is that the queue needs to be extended
52 * without letting the "user" know so they user doesn't end up
53 * using these extra entries.
54 */
55#define HFI1_TID_RDMA_WRITE_CNT 8
56
Kaike Wand22a2072019-01-23 19:20:42 -080057struct tid_rdma_params {
58 struct rcu_head rcu_head;
59 u32 qp;
60 u32 max_len;
61 u16 jkey;
62 u8 max_read;
63 u8 max_write;
64 u8 timeout;
65 u8 urg;
66 u8 version;
67};
68
69struct tid_rdma_qp_params {
Kaike Wan37356e72019-02-05 14:13:13 -080070 struct work_struct trigger_work;
Kaike Wand22a2072019-01-23 19:20:42 -080071 struct tid_rdma_params local;
72 struct tid_rdma_params __rcu *remote;
73};
74
Kaike Wan37356e72019-02-05 14:13:13 -080075/* Track state for each hardware flow */
76struct tid_flow_state {
77 u32 generation;
78 u32 psn;
Kaike Wan37356e72019-02-05 14:13:13 -080079 u8 index;
80 u8 last_index;
Kaike Wan37356e72019-02-05 14:13:13 -080081};
82
Kaike Wan742a3822019-01-23 19:30:40 -080083enum tid_rdma_req_state {
84 TID_REQUEST_INACTIVE = 0,
85 TID_REQUEST_INIT,
86 TID_REQUEST_INIT_RESEND,
87 TID_REQUEST_ACTIVE,
88 TID_REQUEST_RESEND,
89 TID_REQUEST_RESEND_ACTIVE,
90 TID_REQUEST_QUEUED,
91 TID_REQUEST_SYNC,
92 TID_REQUEST_RNR_NAK,
93 TID_REQUEST_COMPLETE,
94};
95
Kaike Wan838b6fd2019-01-23 19:30:07 -080096struct tid_rdma_request {
97 struct rvt_qp *qp;
98 struct hfi1_ctxtdata *rcd;
99 union {
100 struct rvt_swqe *swqe;
101 struct rvt_ack_entry *ack;
102 } e;
103
104 struct tid_rdma_flow *flows; /* array of tid flows */
Kaike Wan07b92372019-01-23 21:48:59 -0800105 struct rvt_sge_state ss; /* SGE state for TID RDMA requests */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800106 u16 n_flows; /* size of the flow buffer window */
107 u16 setup_head; /* flow index we are setting up */
108 u16 clear_tail; /* flow index we are clearing */
109 u16 flow_idx; /* flow index most recently set up */
Kaike Wan07b92372019-01-23 21:48:59 -0800110 u16 acked_tail;
Kaike Wan838b6fd2019-01-23 19:30:07 -0800111
112 u32 seg_len;
Kaike Wand0d564a2019-01-23 19:31:02 -0800113 u32 total_len;
Kaike Wan9e93e962019-01-23 21:50:14 -0800114 u32 r_ack_psn; /* next expected ack PSN */
Kaike Wand0d564a2019-01-23 19:31:02 -0800115 u32 r_flow_psn; /* IB PSN of next segment start */
Kaike Wan72a0ea92019-01-23 21:49:31 -0800116 u32 r_last_acked; /* IB PSN of last ACK'ed packet */
Kaike Wan742a3822019-01-23 19:30:40 -0800117 u32 s_next_psn; /* IB PSN of next segment start for read */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800118
Kaike Wand0d564a2019-01-23 19:31:02 -0800119 u32 total_segs; /* segments required to complete a request */
Kaike Wan742a3822019-01-23 19:30:40 -0800120 u32 cur_seg; /* index of current segment */
Kaike Wand0d564a2019-01-23 19:31:02 -0800121 u32 comp_seg; /* index of last completed segment */
122 u32 ack_seg; /* index of last ack'ed segment */
Kaike Wan07b92372019-01-23 21:48:59 -0800123 u32 alloc_seg; /* index of next segment to be allocated */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800124 u32 isge; /* index of "current" sge */
Kaike Wan742a3822019-01-23 19:30:40 -0800125 u32 ack_pending; /* num acks pending for this request */
126
127 enum tid_rdma_req_state state;
Kaike Wan838b6fd2019-01-23 19:30:07 -0800128};
129
130/*
131 * When header suppression is used, PSNs associated with a "flow" are
132 * relevant (and not the PSNs maintained by verbs). Track per-flow
133 * PSNs here for a TID RDMA segment.
134 *
135 */
136struct flow_state {
137 u32 flags;
138 u32 resp_ib_psn; /* The IB PSN of the response for this flow */
139 u32 generation; /* generation of flow */
140 u32 spsn; /* starting PSN in TID space */
141 u32 lpsn; /* last PSN in TID space */
142 u32 r_next_psn; /* next PSN to be received (in TID space) */
Kaike Wan742a3822019-01-23 19:30:40 -0800143
144 /* For tid rdma read */
145 u32 ib_spsn; /* starting PSN in Verbs space */
146 u32 ib_lpsn; /* last PSn in Verbs space */
Kaike Wan838b6fd2019-01-23 19:30:07 -0800147};
148
149struct tid_rdma_pageset {
150 dma_addr_t addr : 48; /* Only needed for the first page */
151 u8 idx: 8;
152 u8 count : 7;
153 u8 mapped: 1;
154};
155
156/**
157 * kern_tid_node - used for managing TID's in TID groups
158 *
159 * @grp_idx: rcd relative index to tid_group
160 * @map: grp->map captured prior to programming this TID group in HW
161 * @cnt: Only @cnt of available group entries are actually programmed
162 */
163struct kern_tid_node {
164 struct tid_group *grp;
165 u8 map;
166 u8 cnt;
167};
168
169/* Overall info for a TID RDMA segment */
170struct tid_rdma_flow {
171 /*
172 * While a TID RDMA segment is being transferred, it uses a QP number
173 * from the "KDETH section of QP numbers" (which is different from the
174 * QP number that originated the request). Bits 11-15 of these QP
175 * numbers identify the "TID flow" for the segment.
176 */
177 struct flow_state flow_state;
178 struct tid_rdma_request *req;
Kaike Wand0d564a2019-01-23 19:31:02 -0800179 u32 tid_qpn;
180 u32 tid_offset;
Kaike Wan838b6fd2019-01-23 19:30:07 -0800181 u32 length;
Kaike Wan742a3822019-01-23 19:30:40 -0800182 u32 sent;
Kaike Wan838b6fd2019-01-23 19:30:07 -0800183 u8 tnode_cnt;
184 u8 tidcnt;
Kaike Wan742a3822019-01-23 19:30:40 -0800185 u8 tid_idx;
Kaike Wan838b6fd2019-01-23 19:30:07 -0800186 u8 idx;
187 u8 npagesets;
188 u8 npkts;
Kaike Wan742a3822019-01-23 19:30:40 -0800189 u8 pkt;
Kaike Wan72a0ea92019-01-23 21:49:31 -0800190 u8 resync_npkts;
Kaike Wan838b6fd2019-01-23 19:30:07 -0800191 struct kern_tid_node tnode[TID_RDMA_MAX_PAGES];
192 struct tid_rdma_pageset pagesets[TID_RDMA_MAX_PAGES];
193 u32 tid_entry[TID_RDMA_MAX_PAGES];
194};
195
Kaike Wan07b92372019-01-23 21:48:59 -0800196enum tid_rnr_nak_state {
197 TID_RNR_NAK_INIT = 0,
198 TID_RNR_NAK_SEND,
199 TID_RNR_NAK_SENT,
200};
201
Kaike Wand22a2072019-01-23 19:20:42 -0800202bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data);
203bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data);
204bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data);
205void tid_rdma_conn_error(struct rvt_qp *qp);
206void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p);
207
208int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
Kaike Wan838b6fd2019-01-23 19:30:07 -0800209int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
210 struct rvt_sge_state *ss, bool *last);
211int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req);
212void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req);
213void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
214
215/**
216 * trdma_clean_swqe - clean flows for swqe if large send queue
217 * @qp: the qp
218 * @wqe: the send wqe
219 */
220static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
221{
222 if (!wqe->priv)
223 return;
224 __trdma_clean_swqe(qp, wqe);
225}
Kaike Wand22a2072019-01-23 19:20:42 -0800226
Kaike Wan9905bf02019-02-05 14:13:30 -0800227void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp);
228
Mike Marciniszyn5190f052018-11-28 10:22:31 -0800229int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
230 struct ib_qp_init_attr *init_attr);
Kaike Wan48a615d2019-01-23 19:21:11 -0800231void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
Mike Marciniszyn5190f052018-11-28 10:22:31 -0800232
Kaike Wan37356e72019-02-05 14:13:13 -0800233void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp);
234
235int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
236void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
237void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
238
Kaike Wan2f16a692019-01-23 19:30:18 -0800239struct cntr_entry;
240u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
241 void *context, int vl, int mode, u64 data);
242
Kaike Wan742a3822019-01-23 19:30:40 -0800243u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
244 struct ib_other_headers *ohdr,
245 u32 *bth1, u32 *bth2, u32 *len);
246u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
247 struct ib_other_headers *ohdr, u32 *bth1,
248 u32 *bth2, u32 *len);
Kaike Wand0d564a2019-01-23 19:31:02 -0800249void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet);
Kaike Wan1db21b52019-01-23 19:31:12 -0800250u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
251 struct ib_other_headers *ohdr, u32 *bth0,
252 u32 *bth1, u32 *bth2, u32 *len, bool *last);
Kaike Wan9905bf02019-02-05 14:13:30 -0800253void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet);
254bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
255 struct hfi1_pportdata *ppd,
256 struct hfi1_packet *packet);
Kaike Wanb1260782019-01-23 19:31:46 -0800257void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
258 u32 *bth2);
Kaike Wan24b11922019-01-23 19:32:09 -0800259void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp);
Kaike Wana0b34f72019-01-24 06:36:48 -0800260bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe);
Kaike Wan742a3822019-01-23 19:30:40 -0800261
Kaike Wanf1ab4ef2019-01-23 19:32:30 -0800262void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
263static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
264 struct rvt_swqe *wqe)
265{
266 if (wqe->priv &&
Kaike Wanad008892019-01-23 21:51:59 -0800267 (wqe->wr.opcode == IB_WR_RDMA_READ ||
268 wqe->wr.opcode == IB_WR_RDMA_WRITE) &&
Kaike Wanf1ab4ef2019-01-23 19:32:30 -0800269 wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
270 setup_tid_rdma_wqe(qp, wqe);
271}
272
Kaike Wanc098bbb2019-01-23 21:48:28 -0800273u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
274 struct ib_other_headers *ohdr,
275 u32 *bth1, u32 *bth2, u32 *len);
Kaike Wan07b92372019-01-23 21:48:59 -0800276
277void hfi1_compute_tid_rdma_flow_wt(void);
278
279void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet);
280
Kaike Wan38d46d32019-01-23 21:49:09 -0800281u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
282 struct ib_other_headers *ohdr, u32 *bth1,
283 u32 bth2, u32 *len,
284 struct rvt_sge_state **ss);
285
Kaike Wan3c759e02019-01-23 21:49:19 -0800286void hfi1_del_tid_reap_timer(struct rvt_qp *qp);
287
Kaike Wan72a0ea92019-01-23 21:49:31 -0800288void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet);
289
Kaike Wan539e1902019-01-23 21:49:41 -0800290bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
291 struct ib_other_headers *ohdr,
292 u32 *bth1, u32 *bth2, u32 *len);
293
Kaike Wand72fe7d2019-01-23 21:49:51 -0800294void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet);
295
Kaike Wan0f75e322019-01-23 21:50:03 -0800296u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
297 struct ib_other_headers *ohdr, u16 iflow,
298 u32 *bth1, u32 *bth2);
299
Kaike Wan9e93e962019-01-23 21:50:14 -0800300void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet);
301
Kaike Wan829eaee2019-01-23 21:50:24 -0800302void hfi1_add_tid_retry_timer(struct rvt_qp *qp);
303void hfi1_del_tid_retry_timer(struct rvt_qp *qp);
304
Kaike Wan6e391c6a2019-01-23 21:50:36 -0800305u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
306 struct ib_other_headers *ohdr, u32 *bth1,
307 u32 *bth2, u16 fidx);
308
Kaike Wan7cf0ad62019-01-23 21:50:46 -0800309void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet);
310
Kaike Wan70dcb2e2019-01-23 21:51:07 -0800311struct hfi1_pkt_state;
312int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
313
Kaike Wan572f0c32019-01-23 21:51:27 -0800314void _hfi1_do_tid_send(struct work_struct *work);
315
316bool hfi1_schedule_tid_send(struct rvt_qp *qp);
317
Kaike Wanc6c23112019-01-23 21:51:49 -0800318bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e);
319
Mike Marciniszyn5190f052018-11-28 10:22:31 -0800320#endif /* HFI1_TID_RDMA_H */