Leon Romanovsky | 6bf9d8f | 2020-07-19 10:25:21 +0300 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 2 | /* |
Gary Leshner | 84e3b19 | 2020-05-11 12:06:00 -0400 | [diff] [blame] | 3 | * Copyright(c) 2016 - 2020 Intel Corporation. |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 4 | */ |
| 5 | |
Leon Romanovsky | 6bf9d8f | 2020-07-19 10:25:21 +0300 | [diff] [blame] | 6 | #ifndef DEF_RDMAVT_INCQP_H |
| 7 | #define DEF_RDMAVT_INCQP_H |
| 8 | |
Dennis Dalessandro | 5a9cf6f | 2016-01-22 12:50:24 -0800 | [diff] [blame] | 9 | #include <rdma/rdma_vt.h> |
Dennis Dalessandro | 050eb7f | 2016-01-22 12:50:11 -0800 | [diff] [blame] | 10 | #include <rdma/ib_pack.h> |
Dennis Dalessandro | 4e74080 | 2016-01-22 13:00:55 -0800 | [diff] [blame] | 11 | #include <rdma/ib_verbs.h> |
Mike Marciniszyn | f2dc9cd | 2016-12-07 19:34:06 -0800 | [diff] [blame] | 12 | #include <rdma/rdmavt_cq.h> |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 13 | #include <rdma/rvt-abi.h> |
Dennis Dalessandro | 050eb7f | 2016-01-22 12:50:11 -0800 | [diff] [blame] | 14 | /* |
| 15 | * Atomic bit definitions for r_aflags. |
| 16 | */ |
| 17 | #define RVT_R_WRID_VALID 0 |
| 18 | #define RVT_R_REWIND_SGE 1 |
| 19 | |
| 20 | /* |
| 21 | * Bit definitions for r_flags. |
| 22 | */ |
| 23 | #define RVT_R_REUSE_SGE 0x01 |
| 24 | #define RVT_R_RDMAR_SEQ 0x02 |
| 25 | #define RVT_R_RSP_NAK 0x04 |
| 26 | #define RVT_R_RSP_SEND 0x08 |
| 27 | #define RVT_R_COMM_EST 0x10 |
| 28 | |
| 29 | /* |
Gary Leshner | 84e3b19 | 2020-05-11 12:06:00 -0400 | [diff] [blame] | 30 | * If a packet's QP[23:16] bits match this value, then it is |
| 31 | * a PSM packet and the hardware will expect a KDETH header |
| 32 | * following the BTH. |
| 33 | */ |
| 34 | #define RVT_KDETH_QP_PREFIX 0x80 |
| 35 | #define RVT_KDETH_QP_SUFFIX 0xffff |
| 36 | #define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000 |
| 37 | #define RVT_KDETH_QP_PREFIX_SHIFT 16 |
| 38 | #define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \ |
| 39 | RVT_KDETH_QP_PREFIX_SHIFT) |
| 40 | #define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX) |
| 41 | |
| 42 | /* |
| 43 | * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this |
| 44 | * prefix value, then it is an AIP packet with a DETH containing the entropy |
| 45 | * value in byte 4 following the BTH. |
| 46 | */ |
| 47 | #define RVT_AIP_QP_PREFIX 0x81 |
| 48 | #define RVT_AIP_QP_SUFFIX 0xffff |
| 49 | #define RVT_AIP_QP_PREFIX_MASK 0x00ff0000 |
| 50 | #define RVT_AIP_QP_PREFIX_SHIFT 16 |
| 51 | #define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \ |
| 52 | RVT_AIP_QP_PREFIX_SHIFT) |
| 53 | #define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT) |
| 54 | #define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1) |
| 55 | |
| 56 | /* |
Dennis Dalessandro | 050eb7f | 2016-01-22 12:50:11 -0800 | [diff] [blame] | 57 | * Bit definitions for s_flags. |
| 58 | * |
| 59 | * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled |
| 60 | * RVT_S_BUSY - send tasklet is processing the QP |
| 61 | * RVT_S_TIMER - the RC retry timer is active |
| 62 | * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics |
| 63 | * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs |
| 64 | * before processing the next SWQE |
| 65 | * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete |
| 66 | * before processing the next SWQE |
| 67 | * RVT_S_WAIT_RNR - waiting for RNR timeout |
| 68 | * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE |
| 69 | * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating |
| 70 | * next send completion entry not via send DMA |
| 71 | * RVT_S_WAIT_PIO - waiting for a send buffer to be available |
| 72 | * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available |
| 73 | * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available |
| 74 | * RVT_S_WAIT_KMEM - waiting for kernel memory to be available |
| 75 | * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue |
| 76 | * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests |
| 77 | * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK |
| 78 | * RVT_S_ECN - a BECN was queued to the send engine |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 79 | * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt |
Dennis Dalessandro | 050eb7f | 2016-01-22 12:50:11 -0800 | [diff] [blame] | 80 | */ |
| 81 | #define RVT_S_SIGNAL_REQ_WR 0x0001 |
| 82 | #define RVT_S_BUSY 0x0002 |
| 83 | #define RVT_S_TIMER 0x0004 |
| 84 | #define RVT_S_RESP_PENDING 0x0008 |
| 85 | #define RVT_S_ACK_PENDING 0x0010 |
| 86 | #define RVT_S_WAIT_FENCE 0x0020 |
| 87 | #define RVT_S_WAIT_RDMAR 0x0040 |
| 88 | #define RVT_S_WAIT_RNR 0x0080 |
| 89 | #define RVT_S_WAIT_SSN_CREDIT 0x0100 |
| 90 | #define RVT_S_WAIT_DMA 0x0200 |
| 91 | #define RVT_S_WAIT_PIO 0x0400 |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 92 | #define RVT_S_WAIT_TX 0x0800 |
| 93 | #define RVT_S_WAIT_DMA_DESC 0x1000 |
| 94 | #define RVT_S_WAIT_KMEM 0x2000 |
| 95 | #define RVT_S_WAIT_PSN 0x4000 |
| 96 | #define RVT_S_WAIT_ACK 0x8000 |
| 97 | #define RVT_S_SEND_ONE 0x10000 |
| 98 | #define RVT_S_UNLIMITED_CREDIT 0x20000 |
| 99 | #define RVT_S_ECN 0x40000 |
| 100 | #define RVT_S_MAX_BIT_MASK 0x800000 |
| 101 | |
| 102 | /* |
| 103 | * Drivers should use s_flags starting with bit 31 down to the bit next to |
| 104 | * RVT_S_MAX_BIT_MASK |
| 105 | */ |
Dennis Dalessandro | 050eb7f | 2016-01-22 12:50:11 -0800 | [diff] [blame] | 106 | |
| 107 | /* |
| 108 | * Wait flags that would prevent any packet type from being sent. |
| 109 | */ |
Mike Marciniszyn | f39cc34 | 2016-04-12 10:45:51 -0700 | [diff] [blame] | 110 | #define RVT_S_ANY_WAIT_IO \ |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 111 | (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \ |
Mike Marciniszyn | f39cc34 | 2016-04-12 10:45:51 -0700 | [diff] [blame] | 112 | RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM) |
Dennis Dalessandro | 050eb7f | 2016-01-22 12:50:11 -0800 | [diff] [blame] | 113 | |
| 114 | /* |
| 115 | * Wait flags that would prevent send work requests from making progress. |
| 116 | */ |
| 117 | #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \ |
| 118 | RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \ |
| 119 | RVT_S_WAIT_PSN | RVT_S_WAIT_ACK) |
| 120 | |
| 121 | #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND) |
| 122 | |
| 123 | /* Number of bits to pay attention to in the opcode for checking qp type */ |
| 124 | #define RVT_OPCODE_QP_MASK 0xE0 |
| 125 | |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 126 | /* Flags for checking QP state (see ib_rvt_state_ops[]) */ |
| 127 | #define RVT_POST_SEND_OK 0x01 |
| 128 | #define RVT_POST_RECV_OK 0x02 |
| 129 | #define RVT_PROCESS_RECV_OK 0x04 |
| 130 | #define RVT_PROCESS_SEND_OK 0x08 |
| 131 | #define RVT_PROCESS_NEXT_SEND_OK 0x10 |
| 132 | #define RVT_FLUSH_SEND 0x20 |
| 133 | #define RVT_FLUSH_RECV 0x40 |
| 134 | #define RVT_PROCESS_OR_FLUSH_SEND \ |
| 135 | (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) |
Mike Marciniszyn | f9215b5 | 2017-02-08 05:27:49 -0800 | [diff] [blame] | 136 | #define RVT_SEND_OR_FLUSH_OR_RECV_OK \ |
| 137 | (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK) |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 138 | |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 139 | /* |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 140 | * Internal send flags |
| 141 | */ |
| 142 | #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START |
Jianxin Xiong | d9b13c2 | 2016-07-25 13:39:45 -0700 | [diff] [blame] | 143 | #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 144 | |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 145 | /** |
| 146 | * rvt_ud_wr - IB UD work plus AH cache |
| 147 | * @wr: valid IB work request |
| 148 | * @attr: pointer to an allocated AH attribute |
| 149 | * |
| 150 | * Special case the UD WR so we can keep track of the AH attributes. |
| 151 | * |
| 152 | * NOTE: This data structure is stricly ordered wr then attr. I.e the attr |
| 153 | * MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr. |
| 154 | * The copy assumes that wr is first. |
| 155 | */ |
| 156 | struct rvt_ud_wr { |
| 157 | struct ib_ud_wr wr; |
| 158 | struct rdma_ah_attr *attr; |
| 159 | }; |
| 160 | |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 161 | /* |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 162 | * Send work request queue entry. |
| 163 | * The size of the sg_list is determined when the QP is created and stored |
| 164 | * in qp->s_max_sge. |
| 165 | */ |
| 166 | struct rvt_swqe { |
| 167 | union { |
| 168 | struct ib_send_wr wr; /* don't use wr.sg_list */ |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 169 | struct rvt_ud_wr ud_wr; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 170 | struct ib_reg_wr reg_wr; |
| 171 | struct ib_rdma_wr rdma_wr; |
| 172 | struct ib_atomic_wr atomic_wr; |
| 173 | }; |
| 174 | u32 psn; /* first packet sequence number */ |
| 175 | u32 lpsn; /* last packet sequence number */ |
| 176 | u32 ssn; /* send sequence number */ |
| 177 | u32 length; /* total length of data in sg_list */ |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 178 | void *priv; /* driver dependent field */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 179 | struct rvt_sge sg_list[]; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 180 | }; |
| 181 | |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 182 | /** |
| 183 | * struct rvt_krwq - kernel struct receive work request |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 184 | * @p_lock: lock to protect producer of the kernel buffer |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 185 | * @head: index of next entry to fill |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 186 | * @c_lock:lock to protect consumer of the kernel buffer |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 187 | * @tail: index of next entry to pull |
| 188 | * @count: count is aproximate of total receive enteries posted |
| 189 | * @rvt_rwqe: struct of receive work request queue entry |
| 190 | * |
| 191 | * This structure is used to contain the head pointer, |
| 192 | * tail pointer and receive work queue entries for kernel |
| 193 | * mode user. |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 194 | */ |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 195 | struct rvt_krwq { |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 196 | spinlock_t p_lock; /* protect producer */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 197 | u32 head; /* new work requests posted to the head */ |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 198 | |
| 199 | /* protect consumer */ |
| 200 | spinlock_t c_lock ____cacheline_aligned_in_smp; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 201 | u32 tail; /* receives pull requests from here. */ |
Kamenee Arumugam | f592ae3 | 2019-06-28 14:04:30 -0400 | [diff] [blame] | 202 | u32 count; /* approx count of receive entries posted */ |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 203 | struct rvt_rwqe *curr_wq; |
| 204 | struct rvt_rwqe wq[]; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 205 | }; |
| 206 | |
Michael J. Ruhl | 2b0ad2d | 2019-06-28 14:22:11 -0400 | [diff] [blame] | 207 | /* |
| 208 | * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah |
| 209 | * @swqe: valid Send WQE |
| 210 | * |
| 211 | */ |
| 212 | static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe) |
| 213 | { |
| 214 | return ibah_to_rvtah(swqe->ud_wr.wr.ah); |
| 215 | } |
| 216 | |
| 217 | /** |
| 218 | * rvt_get_swqe_ah_attr - Return the cached ah attribute information |
| 219 | * @swqe: valid Send WQE |
| 220 | * |
| 221 | */ |
| 222 | static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe) |
| 223 | { |
| 224 | return swqe->ud_wr.attr; |
| 225 | } |
| 226 | |
| 227 | /** |
| 228 | * rvt_get_swqe_remote_qpn - Access the remote QPN value |
| 229 | * @swqe: valid Send WQE |
| 230 | * |
| 231 | */ |
| 232 | static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe) |
| 233 | { |
| 234 | return swqe->ud_wr.wr.remote_qpn; |
| 235 | } |
| 236 | |
| 237 | /** |
| 238 | * rvt_get_swqe_remote_qkey - Acces the remote qkey value |
| 239 | * @swqe: valid Send WQE |
| 240 | * |
| 241 | */ |
| 242 | static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe) |
| 243 | { |
| 244 | return swqe->ud_wr.wr.remote_qkey; |
| 245 | } |
| 246 | |
| 247 | /** |
| 248 | * rvt_get_swqe_pkey_index - Access the pkey index |
| 249 | * @swqe: valid Send WQE |
| 250 | * |
| 251 | */ |
| 252 | static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe) |
| 253 | { |
| 254 | return swqe->ud_wr.wr.pkey_index; |
| 255 | } |
| 256 | |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 257 | struct rvt_rq { |
| 258 | struct rvt_rwq *wq; |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 259 | struct rvt_krwq *kwq; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 260 | u32 size; /* size of RWQE array */ |
| 261 | u8 max_sge; |
| 262 | /* protect changes in this struct */ |
| 263 | spinlock_t lock ____cacheline_aligned_in_smp; |
| 264 | }; |
| 265 | |
Mike Marciniszyn | 54a485e | 2020-07-28 14:38:48 -0400 | [diff] [blame] | 266 | /** |
| 267 | * rvt_get_rq_count - count numbers of request work queue entries |
| 268 | * in circular buffer |
| 269 | * @rq: data structure for request queue entry |
| 270 | * @head: head indices of the circular buffer |
| 271 | * @tail: tail indices of the circular buffer |
| 272 | * |
| 273 | * Return - total number of entries in the Receive Queue |
| 274 | */ |
| 275 | |
| 276 | static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail) |
| 277 | { |
| 278 | u32 count = head - tail; |
| 279 | |
| 280 | if ((s32)count < 0) |
| 281 | count += rq->size; |
| 282 | return count; |
| 283 | } |
| 284 | |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 285 | /* |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 286 | * This structure holds the information that the send tasklet needs |
| 287 | * to send a RDMA read response or atomic operation. |
| 288 | */ |
| 289 | struct rvt_ack_entry { |
Ira Weiny | fe50827 | 2016-07-27 21:07:36 -0400 | [diff] [blame] | 290 | struct rvt_sge rdma_sge; |
| 291 | u64 atomic_data; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 292 | u32 psn; |
| 293 | u32 lpsn; |
Ira Weiny | fe50827 | 2016-07-27 21:07:36 -0400 | [diff] [blame] | 294 | u8 opcode; |
| 295 | u8 sent; |
Kaike Wan | 838b6fd | 2019-01-23 19:30:07 -0800 | [diff] [blame] | 296 | void *priv; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 297 | }; |
| 298 | |
Vennila Megavannan | bfee5e3 | 2016-02-09 14:29:49 -0800 | [diff] [blame] | 299 | #define RC_QP_SCALING_INTERVAL 5 |
| 300 | |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 301 | #define RVT_OPERATION_PRIV 0x00000001 |
| 302 | #define RVT_OPERATION_ATOMIC 0x00000002 |
| 303 | #define RVT_OPERATION_ATOMIC_SGE 0x00000004 |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 304 | #define RVT_OPERATION_LOCAL 0x00000008 |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 305 | #define RVT_OPERATION_USE_RESERVE 0x00000010 |
Kaike Wan | 3c6cb20 | 2019-01-23 21:51:39 -0800 | [diff] [blame] | 306 | #define RVT_OPERATION_IGN_RNR_CNT 0x00000020 |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 307 | |
| 308 | #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1) |
| 309 | |
| 310 | /** |
| 311 | * rvt_operation_params - op table entry |
| 312 | * @length - the length to copy into the swqe entry |
| 313 | * @qpt_support - a bit mask indicating QP type support |
| 314 | * @flags - RVT_OPERATION flags (see above) |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 315 | * |
Mike Marciniszyn | afcf8f7 | 2016-07-01 16:02:07 -0700 | [diff] [blame] | 316 | * This supports table driven post send so that |
| 317 | * the driver can have differing an potentially |
| 318 | * different sets of operations. |
| 319 | * |
| 320 | **/ |
| 321 | |
| 322 | struct rvt_operation_params { |
| 323 | size_t length; |
| 324 | u32 qpt_support; |
| 325 | u32 flags; |
| 326 | }; |
| 327 | |
| 328 | /* |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 329 | * Common variables are protected by both r_rq.lock and s_lock in that order |
| 330 | * which only happens in modify_qp() or changing the QP 'state'. |
| 331 | */ |
| 332 | struct rvt_qp { |
| 333 | struct ib_qp ibqp; |
| 334 | void *priv; /* Driver private data */ |
| 335 | /* read mostly fields above and below */ |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 336 | struct rdma_ah_attr remote_ah_attr; |
| 337 | struct rdma_ah_attr alt_ah_attr; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 338 | struct rvt_qp __rcu *next; /* link list for QPN hash table */ |
| 339 | struct rvt_swqe *s_wq; /* send work queue */ |
| 340 | struct rvt_mmap_info *ip; |
| 341 | |
| 342 | unsigned long timeout_jiffies; /* computed from timeout */ |
| 343 | |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 344 | int srate_mbps; /* s_srate (below) converted to Mbit/s */ |
Mike Marciniszyn | ef086c0 | 2016-03-07 11:35:08 -0800 | [diff] [blame] | 345 | pid_t pid; /* pid for user mode QPs */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 346 | u32 remote_qpn; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 347 | u32 qkey; /* QKEY for this QP (for UD or RD) */ |
| 348 | u32 s_size; /* send work queue size */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 349 | |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 350 | u16 pmtu; /* decoded from path_mtu */ |
| 351 | u8 log_pmtu; /* shift for pmtu */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 352 | u8 state; /* QP state */ |
| 353 | u8 allowed_ops; /* high order bits of allowed opcodes */ |
| 354 | u8 qp_access_flags; |
| 355 | u8 alt_timeout; /* Alternate path timeout for this QP */ |
| 356 | u8 timeout; /* Timeout for this QP */ |
| 357 | u8 s_srate; |
| 358 | u8 s_mig_state; |
| 359 | u8 port_num; |
| 360 | u8 s_pkey_index; /* PKEY index to use */ |
| 361 | u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ |
| 362 | u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ |
| 363 | u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ |
| 364 | u8 s_retry_cnt; /* number of times to retry */ |
| 365 | u8 s_rnr_retry_cnt; |
| 366 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ |
| 367 | u8 s_max_sge; /* size of s_wq->sg_list */ |
| 368 | u8 s_draining; |
| 369 | |
| 370 | /* start of read/write fields */ |
| 371 | atomic_t refcount ____cacheline_aligned_in_smp; |
| 372 | wait_queue_head_t wait; |
| 373 | |
Mike Marciniszyn | 8b103e9 | 2016-05-24 12:50:40 -0700 | [diff] [blame] | 374 | struct rvt_ack_entry *s_ack_queue; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 375 | struct rvt_sge_state s_rdma_read_sge; |
| 376 | |
| 377 | spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ |
Mike Marciniszyn | d2421a8 | 2016-02-14 12:44:26 -0800 | [diff] [blame] | 378 | u32 r_psn; /* expected rcv packet sequence number */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 379 | unsigned long r_aflags; |
| 380 | u64 r_wr_id; /* ID for current receive WQE */ |
| 381 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ |
| 382 | u32 r_len; /* total length of r_sge */ |
| 383 | u32 r_rcv_len; /* receive data len processed */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 384 | u32 r_msn; /* message sequence number */ |
| 385 | |
| 386 | u8 r_state; /* opcode of last packet received */ |
| 387 | u8 r_flags; |
| 388 | u8 r_head_ack_queue; /* index into s_ack_queue[] */ |
Mike Marciniszyn | 688f21c | 2017-05-04 05:14:04 -0700 | [diff] [blame] | 389 | u8 r_adefered; /* defered ack count */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 390 | |
| 391 | struct list_head rspwait; /* link for waiting to respond */ |
| 392 | |
| 393 | struct rvt_sge_state r_sge; /* current receive data */ |
| 394 | struct rvt_rq r_rq; /* receive work queue */ |
| 395 | |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 396 | /* post send line */ |
| 397 | spinlock_t s_hlock ____cacheline_aligned_in_smp; |
| 398 | u32 s_head; /* new entries added here */ |
| 399 | u32 s_next_psn; /* PSN for next request */ |
| 400 | u32 s_avail; /* number of entries avail */ |
| 401 | u32 s_ssn; /* SSN of tail entry */ |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 402 | atomic_t s_reserved_used; /* reserved entries in use */ |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 403 | |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 404 | spinlock_t s_lock ____cacheline_aligned_in_smp; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 405 | u32 s_flags; |
Mike Marciniszyn | d2421a8 | 2016-02-14 12:44:26 -0800 | [diff] [blame] | 406 | struct rvt_sge_state *s_cur_sge; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 407 | struct rvt_swqe *s_wqe; |
| 408 | struct rvt_sge_state s_sge; /* current send request data */ |
| 409 | struct rvt_mregion *s_rdma_mr; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 410 | u32 s_len; /* total length of s_sge */ |
| 411 | u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 412 | u32 s_last_psn; /* last response PSN processed */ |
| 413 | u32 s_sending_psn; /* lowest PSN that is being sent */ |
| 414 | u32 s_sending_hpsn; /* highest PSN that is being sent */ |
| 415 | u32 s_psn; /* current packet sequence number */ |
| 416 | u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ |
| 417 | u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 418 | u32 s_tail; /* next entry to process */ |
| 419 | u32 s_cur; /* current work queue entry */ |
| 420 | u32 s_acked; /* last un-ACK'ed entry */ |
| 421 | u32 s_last; /* last completed entry */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 422 | u32 s_lsn; /* limit sequence number (credit) */ |
Sebastian Sanchez | 7ebfc93 | 2017-10-02 11:04:41 -0700 | [diff] [blame] | 423 | u32 s_ahgpsn; /* set to the psn in the copy of the header */ |
| 424 | u16 s_cur_size; /* size of send packet in bytes */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 425 | u16 s_rdma_ack_cnt; |
Sebastian Sanchez | 7ebfc93 | 2017-10-02 11:04:41 -0700 | [diff] [blame] | 426 | u8 s_hdrwords; /* size of s_hdr in 32 bit words */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 427 | s8 s_ahgidx; |
| 428 | u8 s_state; /* opcode of last packet sent */ |
| 429 | u8 s_ack_state; /* opcode of packet to ACK */ |
| 430 | u8 s_nak_state; /* non-zero if NAK is pending */ |
| 431 | u8 r_nak_state; /* non-zero if NAK is pending */ |
| 432 | u8 s_retry; /* requester retry counter */ |
| 433 | u8 s_rnr_retry; /* requester RNR retry counter */ |
| 434 | u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ |
| 435 | u8 s_tail_ack_queue; /* index into s_ack_queue[] */ |
Kaike Wan | 4f9264d | 2019-01-23 21:48:48 -0800 | [diff] [blame] | 436 | u8 s_acked_ack_queue; /* index into s_ack_queue[] */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 437 | |
| 438 | struct rvt_sge_state s_ack_rdma_sge; |
| 439 | struct timer_list s_timer; |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 440 | struct hrtimer s_rnr_timer; |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 441 | |
Jianxin Xiong | d9f8723 | 2016-07-25 13:38:25 -0700 | [diff] [blame] | 442 | atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ |
| 443 | |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 444 | /* |
| 445 | * This sge list MUST be last. Do not add anything below here. |
| 446 | */ |
Leon Romanovsky | 44da373 | 2021-07-23 14:39:49 +0300 | [diff] [blame] | 447 | struct rvt_sge *r_sg_list /* verified SGEs */ |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 448 | ____cacheline_aligned_in_smp; |
| 449 | }; |
| 450 | |
| 451 | struct rvt_srq { |
| 452 | struct ib_srq ibsrq; |
| 453 | struct rvt_rq rq; |
| 454 | struct rvt_mmap_info *ip; |
| 455 | /* send signal when number of RWQEs < limit */ |
| 456 | u32 limit; |
| 457 | }; |
| 458 | |
Mike Marciniszyn | 715ab1a | 2019-04-11 07:16:11 -0700 | [diff] [blame] | 459 | static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq) |
| 460 | { |
| 461 | return container_of(ibsrq, struct rvt_srq, ibsrq); |
| 462 | } |
| 463 | |
| 464 | static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp) |
| 465 | { |
| 466 | return container_of(ibqp, struct rvt_qp, ibqp); |
| 467 | } |
| 468 | |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 469 | #define RVT_QPN_MAX BIT(24) |
| 470 | #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) |
| 471 | #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) |
| 472 | #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1) |
Don Hiatt | 7dafbab | 2017-05-12 09:19:55 -0700 | [diff] [blame] | 473 | #define RVT_QPN_MASK IB_QPN_MASK |
Dennis Dalessandro | 0acb0cc | 2016-01-06 10:04:46 -0800 | [diff] [blame] | 474 | |
| 475 | /* |
| 476 | * QPN-map pages start out as NULL, they get allocated upon |
| 477 | * first use and are never deallocated. This way, |
| 478 | * large bitmaps are not allocated unless large numbers of QPs are used. |
| 479 | */ |
| 480 | struct rvt_qpn_map { |
| 481 | void *page; |
| 482 | }; |
| 483 | |
| 484 | struct rvt_qpn_table { |
| 485 | spinlock_t lock; /* protect changes to the qp table */ |
| 486 | unsigned flags; /* flags for QP0/1 allocated for each port */ |
| 487 | u32 last; /* last QP number allocated */ |
| 488 | u32 nmaps; /* size of the map table */ |
| 489 | u16 limit; |
| 490 | u8 incr; |
| 491 | /* bit map of free QP numbers other than 0/1 */ |
| 492 | struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES]; |
| 493 | }; |
| 494 | |
| 495 | struct rvt_qp_ibdev { |
| 496 | u32 qp_table_size; |
| 497 | u32 qp_table_bits; |
| 498 | struct rvt_qp __rcu **qp_table; |
| 499 | spinlock_t qpt_lock; /* qptable lock */ |
| 500 | struct rvt_qpn_table qpn_table; |
| 501 | }; |
| 502 | |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 503 | /* |
Dennis Dalessandro | 4e74080 | 2016-01-22 13:00:55 -0800 | [diff] [blame] | 504 | * There is one struct rvt_mcast for each multicast GID. |
| 505 | * All attached QPs are then stored as a list of |
| 506 | * struct rvt_mcast_qp. |
| 507 | */ |
| 508 | struct rvt_mcast_qp { |
| 509 | struct list_head list; |
| 510 | struct rvt_qp *qp; |
| 511 | }; |
| 512 | |
Michael J. Ruhl | aad9ff9 | 2017-04-09 10:15:57 -0700 | [diff] [blame] | 513 | struct rvt_mcast_addr { |
| 514 | union ib_gid mgid; |
| 515 | u16 lid; |
| 516 | }; |
| 517 | |
Dennis Dalessandro | 4e74080 | 2016-01-22 13:00:55 -0800 | [diff] [blame] | 518 | struct rvt_mcast { |
| 519 | struct rb_node rb_node; |
Michael J. Ruhl | aad9ff9 | 2017-04-09 10:15:57 -0700 | [diff] [blame] | 520 | struct rvt_mcast_addr mcast_addr; |
Dennis Dalessandro | 4e74080 | 2016-01-22 13:00:55 -0800 | [diff] [blame] | 521 | struct list_head qp_list; |
| 522 | wait_queue_head_t wait; |
| 523 | atomic_t refcount; |
| 524 | int n_attached; |
| 525 | }; |
| 526 | |
| 527 | /* |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 528 | * Since struct rvt_swqe is not a fixed size, we can't simply index into |
Dennis Dalessandro | 4e74080 | 2016-01-22 13:00:55 -0800 | [diff] [blame] | 529 | * struct rvt_qp.s_wq. This function does the array index computation. |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 530 | */ |
| 531 | static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp, |
| 532 | unsigned n) |
| 533 | { |
| 534 | return (struct rvt_swqe *)((char *)qp->s_wq + |
| 535 | (sizeof(struct rvt_swqe) + |
| 536 | qp->s_max_sge * |
| 537 | sizeof(struct rvt_sge)) * n); |
| 538 | } |
| 539 | |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 540 | /* |
| 541 | * Since struct rvt_rwqe is not a fixed size, we can't simply index into |
| 542 | * struct rvt_rwq.wq. This function does the array index computation. |
| 543 | */ |
| 544 | static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) |
| 545 | { |
| 546 | return (struct rvt_rwqe *) |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 547 | ((char *)rq->kwq->curr_wq + |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 548 | (sizeof(struct rvt_rwqe) + |
| 549 | rq->max_sge * sizeof(struct ib_sge)) * n); |
| 550 | } |
| 551 | |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 552 | /** |
Brian Welty | 0128fce | 2017-02-08 05:27:31 -0800 | [diff] [blame] | 553 | * rvt_is_user_qp - return if this is user mode QP |
| 554 | * @qp - the target QP |
| 555 | */ |
| 556 | static inline bool rvt_is_user_qp(struct rvt_qp *qp) |
| 557 | { |
| 558 | return !!qp->pid; |
| 559 | } |
| 560 | |
| 561 | /** |
Mike Marciniszyn | 4107b8a | 2016-09-06 04:34:21 -0700 | [diff] [blame] | 562 | * rvt_get_qp - get a QP reference |
| 563 | * @qp - the QP to hold |
| 564 | */ |
| 565 | static inline void rvt_get_qp(struct rvt_qp *qp) |
| 566 | { |
| 567 | atomic_inc(&qp->refcount); |
| 568 | } |
| 569 | |
| 570 | /** |
| 571 | * rvt_put_qp - release a QP reference |
| 572 | * @qp - the QP to release |
| 573 | */ |
| 574 | static inline void rvt_put_qp(struct rvt_qp *qp) |
| 575 | { |
| 576 | if (qp && atomic_dec_and_test(&qp->refcount)) |
| 577 | wake_up(&qp->wait); |
| 578 | } |
| 579 | |
| 580 | /** |
Mike Marciniszyn | f647522 | 2016-12-07 19:34:25 -0800 | [diff] [blame] | 581 | * rvt_put_swqe - drop mr refs held by swqe |
| 582 | * @wqe - the send wqe |
| 583 | * |
| 584 | * This drops any mr references held by the swqe |
| 585 | */ |
| 586 | static inline void rvt_put_swqe(struct rvt_swqe *wqe) |
| 587 | { |
| 588 | int i; |
| 589 | |
| 590 | for (i = 0; i < wqe->wr.num_sge; i++) { |
| 591 | struct rvt_sge *sge = &wqe->sg_list[i]; |
| 592 | |
| 593 | rvt_put_mr(sge->mr); |
| 594 | } |
| 595 | } |
| 596 | |
| 597 | /** |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 598 | * rvt_qp_wqe_reserve - reserve operation |
| 599 | * @qp - the rvt qp |
| 600 | * @wqe - the send wqe |
| 601 | * |
| 602 | * This routine used in post send to record |
| 603 | * a wqe relative reserved operation use. |
| 604 | */ |
| 605 | static inline void rvt_qp_wqe_reserve( |
| 606 | struct rvt_qp *qp, |
| 607 | struct rvt_swqe *wqe) |
| 608 | { |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 609 | atomic_inc(&qp->s_reserved_used); |
| 610 | } |
| 611 | |
| 612 | /** |
| 613 | * rvt_qp_wqe_unreserve - clean reserved operation |
| 614 | * @qp - the rvt qp |
Kaike Wan | 2b74c87 | 2019-07-15 12:45:28 -0400 | [diff] [blame] | 615 | * @flags - send wqe flags |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 616 | * |
| 617 | * This decrements the reserve use count. |
| 618 | * |
| 619 | * This call MUST precede the change to |
| 620 | * s_last to insure that post send sees a stable |
| 621 | * s_avail. |
| 622 | * |
| 623 | * An smp_mp__after_atomic() is used to insure |
| 624 | * the compiler does not juggle the order of the s_last |
| 625 | * ring index and the decrementing of s_reserved_used. |
| 626 | */ |
Kaike Wan | 2b74c87 | 2019-07-15 12:45:28 -0400 | [diff] [blame] | 627 | static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags) |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 628 | { |
Kaike Wan | 2b74c87 | 2019-07-15 12:45:28 -0400 | [diff] [blame] | 629 | if (unlikely(flags & RVT_SEND_RESERVE_USED)) { |
Mike Marciniszyn | 856cc4c | 2016-07-25 13:39:39 -0700 | [diff] [blame] | 630 | atomic_dec(&qp->s_reserved_used); |
| 631 | /* insure no compiler re-order up to s_last change */ |
| 632 | smp_mb__after_atomic(); |
| 633 | } |
| 634 | } |
| 635 | |
Mike Marciniszyn | f2dc9cd | 2016-12-07 19:34:06 -0800 | [diff] [blame] | 636 | extern const enum ib_wc_opcode ib_rvt_wc_opcode[]; |
| 637 | |
Brian Welty | 696513e | 2017-02-08 05:27:07 -0800 | [diff] [blame] | 638 | /* |
| 639 | * Compare the lower 24 bits of the msn values. |
| 640 | * Returns an integer <, ==, or > than zero. |
| 641 | */ |
| 642 | static inline int rvt_cmp_msn(u32 a, u32 b) |
| 643 | { |
| 644 | return (((int)a) - ((int)b)) << 8; |
| 645 | } |
| 646 | |
Brian Welty | 696513e | 2017-02-08 05:27:07 -0800 | [diff] [blame] | 647 | __be32 rvt_compute_aeth(struct rvt_qp *qp); |
| 648 | |
Brian Welty | 696513e | 2017-02-08 05:27:07 -0800 | [diff] [blame] | 649 | void rvt_get_credit(struct rvt_qp *qp, u32 aeth); |
| 650 | |
Kaike Wan | 385156c | 2019-01-23 19:29:44 -0800 | [diff] [blame] | 651 | u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len); |
| 652 | |
| 653 | /** |
Mike Marciniszyn | 4ad6429 | 2019-12-19 16:19:34 -0500 | [diff] [blame] | 654 | * rvt_div_round_up_mtu - round up divide |
Mike Marciniszyn | 5dc8060 | 2016-12-07 19:34:37 -0800 | [diff] [blame] | 655 | * @qp - the qp pair |
| 656 | * @len - the length |
| 657 | * |
| 658 | * Perform a shift based mtu round up divide |
| 659 | */ |
| 660 | static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len) |
| 661 | { |
| 662 | return (len + qp->pmtu - 1) >> qp->log_pmtu; |
| 663 | } |
| 664 | |
| 665 | /** |
| 666 | * @qp - the qp pair |
| 667 | * @len - the length |
| 668 | * |
| 669 | * Perform a shift based mtu divide |
| 670 | */ |
| 671 | static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) |
| 672 | { |
| 673 | return len >> qp->log_pmtu; |
| 674 | } |
| 675 | |
Kaike Wan | a25ce42 | 2017-06-17 10:37:26 -0700 | [diff] [blame] | 676 | /** |
| 677 | * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies |
| 678 | * @timeout - timeout input(0 - 31). |
| 679 | * |
| 680 | * Return a timeout value in jiffies. |
| 681 | */ |
| 682 | static inline unsigned long rvt_timeout_to_jiffies(u8 timeout) |
| 683 | { |
| 684 | if (timeout > 31) |
| 685 | timeout = 31; |
| 686 | |
| 687 | return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL; |
| 688 | } |
| 689 | |
Mike Marciniszyn | 715ab1a | 2019-04-11 07:16:11 -0700 | [diff] [blame] | 690 | /** |
| 691 | * rvt_lookup_qpn - return the QP with the given QPN |
| 692 | * @ibp: the ibport |
| 693 | * @qpn: the QP number to look up |
| 694 | * |
| 695 | * The caller must hold the rcu_read_lock(), and keep the lock until |
| 696 | * the returned qp is no longer in use. |
| 697 | */ |
| 698 | static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi, |
| 699 | struct rvt_ibport *rvp, |
| 700 | u32 qpn) __must_hold(RCU) |
| 701 | { |
| 702 | struct rvt_qp *qp = NULL; |
| 703 | |
| 704 | if (unlikely(qpn <= 1)) { |
| 705 | qp = rcu_dereference(rvp->qp[qpn]); |
| 706 | } else { |
| 707 | u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits); |
| 708 | |
| 709 | for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; |
| 710 | qp = rcu_dereference(qp->next)) |
| 711 | if (qp->ibqp.qp_num == qpn) |
| 712 | break; |
| 713 | } |
| 714 | return qp; |
| 715 | } |
| 716 | |
| 717 | /** |
| 718 | * rvt_mod_retry_timer - mod a retry timer |
| 719 | * @qp - the QP |
| 720 | * @shift - timeout shift to wait for multiple packets |
| 721 | * Modify a potentially already running retry timer |
| 722 | */ |
| 723 | static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift) |
| 724 | { |
| 725 | struct ib_qp *ibqp = &qp->ibqp; |
| 726 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); |
| 727 | |
| 728 | lockdep_assert_held(&qp->s_lock); |
| 729 | qp->s_flags |= RVT_S_TIMER; |
| 730 | /* 4.096 usec. * (1 << qp->timeout) */ |
| 731 | mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies + |
| 732 | (qp->timeout_jiffies << shift)); |
| 733 | } |
| 734 | |
| 735 | static inline void rvt_mod_retry_timer(struct rvt_qp *qp) |
| 736 | { |
| 737 | return rvt_mod_retry_timer_ext(qp, 0); |
| 738 | } |
| 739 | |
Mike Marciniszyn | d40f69c | 2019-04-12 06:41:42 -0700 | [diff] [blame] | 740 | /** |
| 741 | * rvt_put_qp_swqe - drop refs held by swqe |
| 742 | * @qp: the send qp |
| 743 | * @wqe: the send wqe |
| 744 | * |
| 745 | * This drops any references held by the swqe |
| 746 | */ |
| 747 | static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) |
| 748 | { |
| 749 | rvt_put_swqe(wqe); |
| 750 | if (qp->allowed_ops == IB_OPCODE_UD) |
Michael J. Ruhl | d310c4b | 2019-06-28 14:22:04 -0400 | [diff] [blame] | 751 | rdma_destroy_ah_attr(wqe->ud_wr.attr); |
Mike Marciniszyn | d40f69c | 2019-04-12 06:41:42 -0700 | [diff] [blame] | 752 | } |
| 753 | |
Mike Marciniszyn | f56044d | 2019-06-13 08:30:44 -0400 | [diff] [blame] | 754 | /** |
| 755 | * rvt_qp_sqwe_incr - increment ring index |
| 756 | * @qp: the qp |
| 757 | * @val: the starting value |
| 758 | * |
| 759 | * Return: the new value wrapping as appropriate |
| 760 | */ |
| 761 | static inline u32 |
| 762 | rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val) |
| 763 | { |
| 764 | if (++val >= qp->s_size) |
| 765 | val = 0; |
| 766 | return val; |
| 767 | } |
| 768 | |
Kamenee Arumugam | 5136bfe | 2019-06-28 14:21:52 -0400 | [diff] [blame] | 769 | int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); |
| 770 | |
| 771 | /** |
| 772 | * rvt_recv_cq - add a new entry to completion queue |
| 773 | * by receive queue |
| 774 | * @qp: receive queue |
| 775 | * @wc: work completion entry to add |
| 776 | * @solicited: true if @entry is solicited |
| 777 | * |
| 778 | * This is wrapper function for rvt_enter_cq function call by |
| 779 | * receive queue. If rvt_cq_enter return false, it means cq is |
| 780 | * full and the qp is put into error state. |
| 781 | */ |
| 782 | static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc, |
| 783 | bool solicited) |
| 784 | { |
| 785 | struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq); |
| 786 | |
| 787 | if (unlikely(!rvt_cq_enter(cq, wc, solicited))) |
| 788 | rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR); |
| 789 | } |
| 790 | |
| 791 | /** |
| 792 | * rvt_send_cq - add a new entry to completion queue |
| 793 | * by send queue |
| 794 | * @qp: send queue |
| 795 | * @wc: work completion entry to add |
| 796 | * @solicited: true if @entry is solicited |
| 797 | * |
| 798 | * This is wrapper function for rvt_enter_cq function call by |
| 799 | * send queue. If rvt_cq_enter return false, it means cq is |
| 800 | * full and the qp is put into error state. |
| 801 | */ |
| 802 | static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc, |
| 803 | bool solicited) |
| 804 | { |
| 805 | struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); |
| 806 | |
| 807 | if (unlikely(!rvt_cq_enter(cq, wc, solicited))) |
| 808 | rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR); |
| 809 | } |
| 810 | |
Mike Marciniszyn | f56044d | 2019-06-13 08:30:44 -0400 | [diff] [blame] | 811 | /** |
| 812 | * rvt_qp_complete_swqe - insert send completion |
| 813 | * @qp - the qp |
| 814 | * @wqe - the send wqe |
| 815 | * @opcode - wc operation (driver dependent) |
| 816 | * @status - completion status |
| 817 | * |
| 818 | * Update the s_last information, and then insert a send |
| 819 | * completion into the completion |
| 820 | * queue if the qp indicates it should be done. |
| 821 | * |
| 822 | * See IBTA 10.7.3.1 for info on completion |
| 823 | * control. |
| 824 | * |
| 825 | * Return: new last |
| 826 | */ |
| 827 | static inline u32 |
| 828 | rvt_qp_complete_swqe(struct rvt_qp *qp, |
| 829 | struct rvt_swqe *wqe, |
| 830 | enum ib_wc_opcode opcode, |
| 831 | enum ib_wc_status status) |
| 832 | { |
| 833 | bool need_completion; |
| 834 | u64 wr_id; |
| 835 | u32 byte_len, last; |
| 836 | int flags = wqe->wr.send_flags; |
| 837 | |
Kaike Wan | 2b74c87 | 2019-07-15 12:45:28 -0400 | [diff] [blame] | 838 | rvt_qp_wqe_unreserve(qp, flags); |
Mike Marciniszyn | f56044d | 2019-06-13 08:30:44 -0400 | [diff] [blame] | 839 | rvt_put_qp_swqe(qp, wqe); |
| 840 | |
| 841 | need_completion = |
| 842 | !(flags & RVT_SEND_RESERVE_USED) && |
| 843 | (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || |
| 844 | (flags & IB_SEND_SIGNALED) || |
| 845 | status != IB_WC_SUCCESS); |
| 846 | if (need_completion) { |
| 847 | wr_id = wqe->wr.wr_id; |
| 848 | byte_len = wqe->length; |
| 849 | /* above fields required before writing s_last */ |
| 850 | } |
| 851 | last = rvt_qp_swqe_incr(qp, qp->s_last); |
| 852 | /* see rvt_qp_is_avail() */ |
| 853 | smp_store_release(&qp->s_last, last); |
| 854 | if (need_completion) { |
| 855 | struct ib_wc w = { |
| 856 | .wr_id = wr_id, |
| 857 | .status = status, |
| 858 | .opcode = opcode, |
| 859 | .qp = &qp->ibqp, |
| 860 | .byte_len = byte_len, |
| 861 | }; |
Kamenee Arumugam | 5136bfe | 2019-06-28 14:21:52 -0400 | [diff] [blame] | 862 | rvt_send_cq(qp, &w, status != IB_WC_SUCCESS); |
Mike Marciniszyn | f56044d | 2019-06-13 08:30:44 -0400 | [diff] [blame] | 863 | } |
| 864 | return last; |
| 865 | } |
| 866 | |
Mike Marciniszyn | 5dc8060 | 2016-12-07 19:34:37 -0800 | [diff] [blame] | 867 | extern const int ib_rvt_state_ops[]; |
Dennis Dalessandro | bfbac09 | 2016-01-22 13:00:22 -0800 | [diff] [blame] | 868 | |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 869 | struct rvt_dev_info; |
Brian Welty | 832369f | 2018-05-02 06:44:03 -0700 | [diff] [blame] | 870 | int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only); |
Brian Welty | beb5a04 | 2017-02-08 05:27:01 -0800 | [diff] [blame] | 871 | void rvt_comm_est(struct rvt_qp *qp); |
Brian Welty | beb5a04 | 2017-02-08 05:27:01 -0800 | [diff] [blame] | 872 | void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err); |
Don Hiatt | 881fccb | 2017-02-08 05:28:19 -0800 | [diff] [blame] | 873 | unsigned long rvt_rnr_tbl_to_usec(u32 index); |
Venkata Sandeep Dhanalakota | 11a10d4 | 2017-02-08 05:27:13 -0800 | [diff] [blame] | 874 | enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); |
| 875 | void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth); |
| 876 | void rvt_del_timers_sync(struct rvt_qp *qp); |
| 877 | void rvt_stop_rc_timers(struct rvt_qp *qp); |
Kaike Wan | 039cd3d | 2019-01-23 19:31:57 -0800 | [diff] [blame] | 878 | void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift); |
| 879 | static inline void rvt_add_retry_timer(struct rvt_qp *qp) |
| 880 | { |
| 881 | rvt_add_retry_timer_ext(qp, 0); |
| 882 | } |
Dennis Dalessandro | 3b0b3fb | 2016-01-22 13:00:35 -0800 | [diff] [blame] | 883 | |
Brian Welty | 019f118 | 2018-09-26 10:44:33 -0700 | [diff] [blame] | 884 | void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, |
| 885 | void *data, u32 length, |
| 886 | bool release, bool copy_last); |
Venkata Sandeep Dhanalakota | 116aa03 | 2018-09-26 10:44:42 -0700 | [diff] [blame] | 887 | void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, |
| 888 | enum ib_wc_status status); |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 889 | void rvt_ruc_loopback(struct rvt_qp *qp); |
Brian Welty | 019f118 | 2018-09-26 10:44:33 -0700 | [diff] [blame] | 890 | |
Mike Marciniszyn | 4734b4f | 2017-08-28 11:23:45 -0700 | [diff] [blame] | 891 | /** |
| 892 | * struct rvt_qp_iter - the iterator for QPs |
| 893 | * @qp - the current QP |
| 894 | * |
| 895 | * This structure defines the current iterator |
| 896 | * state for sequenced access to all QPs relative |
| 897 | * to an rvt_dev_info. |
| 898 | */ |
| 899 | struct rvt_qp_iter { |
| 900 | struct rvt_qp *qp; |
| 901 | /* private: backpointer */ |
| 902 | struct rvt_dev_info *rdi; |
| 903 | /* private: callback routine */ |
| 904 | void (*cb)(struct rvt_qp *qp, u64 v); |
| 905 | /* private: for arg to callback routine */ |
| 906 | u64 v; |
| 907 | /* private: number of SMI,GSI QPs for device */ |
| 908 | int specials; |
| 909 | /* private: current iterator index */ |
| 910 | int n; |
| 911 | }; |
| 912 | |
Kamenee Arumugam | 239b0e5 | 2019-06-28 14:04:17 -0400 | [diff] [blame] | 913 | /** |
| 914 | * ib_cq_tail - Return tail index of cq buffer |
| 915 | * @send_cq - The cq for send |
| 916 | * |
| 917 | * This is called in qp_iter_print to get tail |
| 918 | * of cq buffer. |
| 919 | */ |
| 920 | static inline u32 ib_cq_tail(struct ib_cq *send_cq) |
| 921 | { |
| 922 | struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); |
| 923 | |
| 924 | return ibcq_to_rvtcq(send_cq)->ip ? |
| 925 | RDMA_READ_UAPI_ATOMIC(cq->queue->tail) : |
| 926 | ibcq_to_rvtcq(send_cq)->kqueue->tail; |
| 927 | } |
| 928 | |
| 929 | /** |
| 930 | * ib_cq_head - Return head index of cq buffer |
| 931 | * @send_cq - The cq for send |
| 932 | * |
| 933 | * This is called in qp_iter_print to get head |
| 934 | * of cq buffer. |
| 935 | */ |
| 936 | static inline u32 ib_cq_head(struct ib_cq *send_cq) |
| 937 | { |
| 938 | struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); |
| 939 | |
| 940 | return ibcq_to_rvtcq(send_cq)->ip ? |
| 941 | RDMA_READ_UAPI_ATOMIC(cq->queue->head) : |
| 942 | ibcq_to_rvtcq(send_cq)->kqueue->head; |
| 943 | } |
| 944 | |
Kamenee Arumugam | dabac6e | 2019-06-28 14:04:24 -0400 | [diff] [blame] | 945 | /** |
| 946 | * rvt_free_rq - free memory allocated for rvt_rq struct |
| 947 | * @rvt_rq: request queue data structure |
| 948 | * |
| 949 | * This function should only be called if the rvt_mmap_info() |
| 950 | * has not succeeded. |
| 951 | */ |
| 952 | static inline void rvt_free_rq(struct rvt_rq *rq) |
| 953 | { |
| 954 | kvfree(rq->kwq); |
| 955 | rq->kwq = NULL; |
| 956 | vfree(rq->wq); |
| 957 | rq->wq = NULL; |
| 958 | } |
| 959 | |
Kaike Wan | 7199435 | 2019-09-11 07:30:47 -0400 | [diff] [blame] | 960 | /** |
| 961 | * rvt_to_iport - Get the ibport pointer |
| 962 | * @qp: the qp pointer |
| 963 | * |
| 964 | * This function returns the ibport pointer from the qp pointer. |
| 965 | */ |
| 966 | static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp) |
| 967 | { |
| 968 | struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
| 969 | |
| 970 | return rdi->ports[qp->port_num - 1]; |
| 971 | } |
| 972 | |
| 973 | /** |
| 974 | * rvt_rc_credit_avail - Check if there are enough RC credits for the request |
| 975 | * @qp: the qp |
| 976 | * @wqe: the request |
| 977 | * |
| 978 | * This function returns false when there are not enough credits for the given |
| 979 | * request and true otherwise. |
| 980 | */ |
| 981 | static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe) |
| 982 | { |
| 983 | lockdep_assert_held(&qp->s_lock); |
| 984 | if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && |
| 985 | rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { |
| 986 | struct rvt_ibport *rvp = rvt_to_iport(qp); |
| 987 | |
| 988 | qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; |
| 989 | rvp->n_rc_crwaits++; |
| 990 | return false; |
| 991 | } |
| 992 | return true; |
| 993 | } |
| 994 | |
Mike Marciniszyn | 4734b4f | 2017-08-28 11:23:45 -0700 | [diff] [blame] | 995 | struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, |
| 996 | u64 v, |
| 997 | void (*cb)(struct rvt_qp *qp, u64 v)); |
| 998 | int rvt_qp_iter_next(struct rvt_qp_iter *iter); |
| 999 | void rvt_qp_iter(struct rvt_dev_info *rdi, |
| 1000 | u64 v, |
| 1001 | void (*cb)(struct rvt_qp *qp, u64 v)); |
Mike Marciniszyn | 0208da9 | 2017-08-28 11:24:10 -0700 | [diff] [blame] | 1002 | void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey); |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 1003 | #endif /* DEF_RDMAVT_INCQP_H */ |