Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB |
| 2 | /* Copyright (c) 2015 - 2021 Intel Corporation */ |
| 3 | #include "main.h" |
| 4 | |
| 5 | static struct irdma_rsrc_limits rsrc_limits_table[] = { |
| 6 | [0] = { |
| 7 | .qplimit = SZ_128, |
| 8 | }, |
| 9 | [1] = { |
| 10 | .qplimit = SZ_1K, |
| 11 | }, |
| 12 | [2] = { |
| 13 | .qplimit = SZ_2K, |
| 14 | }, |
| 15 | [3] = { |
| 16 | .qplimit = SZ_4K, |
| 17 | }, |
| 18 | [4] = { |
| 19 | .qplimit = SZ_16K, |
| 20 | }, |
| 21 | [5] = { |
| 22 | .qplimit = SZ_64K, |
| 23 | }, |
| 24 | [6] = { |
| 25 | .qplimit = SZ_128K, |
| 26 | }, |
| 27 | [7] = { |
| 28 | .qplimit = SZ_256K, |
| 29 | }, |
| 30 | }; |
| 31 | |
| 32 | /* types of hmc objects */ |
| 33 | static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = { |
| 34 | IRDMA_HMC_IW_QP, |
| 35 | IRDMA_HMC_IW_CQ, |
| 36 | IRDMA_HMC_IW_HTE, |
| 37 | IRDMA_HMC_IW_ARP, |
| 38 | IRDMA_HMC_IW_APBVT_ENTRY, |
| 39 | IRDMA_HMC_IW_MR, |
| 40 | IRDMA_HMC_IW_XF, |
| 41 | IRDMA_HMC_IW_XFFL, |
| 42 | IRDMA_HMC_IW_Q1, |
| 43 | IRDMA_HMC_IW_Q1FL, |
| 44 | IRDMA_HMC_IW_TIMER, |
| 45 | IRDMA_HMC_IW_FSIMC, |
| 46 | IRDMA_HMC_IW_FSIAV, |
| 47 | IRDMA_HMC_IW_RRF, |
| 48 | IRDMA_HMC_IW_RRFFL, |
| 49 | IRDMA_HMC_IW_HDR, |
| 50 | IRDMA_HMC_IW_MD, |
| 51 | IRDMA_HMC_IW_OOISC, |
| 52 | IRDMA_HMC_IW_OOISCFFL, |
| 53 | }; |
| 54 | |
| 55 | /** |
| 56 | * irdma_iwarp_ce_handler - handle iwarp completions |
| 57 | * @iwcq: iwarp cq receiving event |
| 58 | */ |
| 59 | static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq) |
| 60 | { |
| 61 | struct irdma_cq *cq = iwcq->back_cq; |
| 62 | |
| 63 | if (cq->ibcq.comp_handler) |
| 64 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); |
| 65 | } |
| 66 | |
| 67 | /** |
| 68 | * irdma_puda_ce_handler - handle puda completion events |
| 69 | * @rf: RDMA PCI function |
| 70 | * @cq: puda completion q for event |
| 71 | */ |
| 72 | static void irdma_puda_ce_handler(struct irdma_pci_f *rf, |
| 73 | struct irdma_sc_cq *cq) |
| 74 | { |
| 75 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 76 | enum irdma_status_code status; |
| 77 | u32 compl_error; |
| 78 | |
| 79 | do { |
| 80 | status = irdma_puda_poll_cmpl(dev, cq, &compl_error); |
| 81 | if (status == IRDMA_ERR_Q_EMPTY) |
| 82 | break; |
| 83 | if (status) { |
| 84 | ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status); |
| 85 | break; |
| 86 | } |
| 87 | if (compl_error) { |
| 88 | ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n", |
| 89 | compl_error); |
| 90 | break; |
| 91 | } |
| 92 | } while (1); |
| 93 | |
| 94 | irdma_sc_ccq_arm(cq); |
| 95 | } |
| 96 | |
| 97 | /** |
| 98 | * irdma_process_ceq - handle ceq for completions |
| 99 | * @rf: RDMA PCI function |
| 100 | * @ceq: ceq having cq for completion |
| 101 | */ |
| 102 | static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq) |
| 103 | { |
| 104 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 105 | struct irdma_sc_ceq *sc_ceq; |
| 106 | struct irdma_sc_cq *cq; |
| 107 | unsigned long flags; |
| 108 | |
| 109 | sc_ceq = &ceq->sc_ceq; |
| 110 | do { |
| 111 | spin_lock_irqsave(&ceq->ce_lock, flags); |
| 112 | cq = irdma_sc_process_ceq(dev, sc_ceq); |
| 113 | if (!cq) { |
| 114 | spin_unlock_irqrestore(&ceq->ce_lock, flags); |
| 115 | break; |
| 116 | } |
| 117 | |
| 118 | if (cq->cq_type == IRDMA_CQ_TYPE_IWARP) |
| 119 | irdma_iwarp_ce_handler(cq); |
| 120 | |
| 121 | spin_unlock_irqrestore(&ceq->ce_lock, flags); |
| 122 | |
| 123 | if (cq->cq_type == IRDMA_CQ_TYPE_CQP) |
| 124 | queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); |
| 125 | else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ || |
| 126 | cq->cq_type == IRDMA_CQ_TYPE_IEQ) |
| 127 | irdma_puda_ce_handler(rf, cq); |
| 128 | } while (1); |
| 129 | } |
| 130 | |
| 131 | static void irdma_set_flush_fields(struct irdma_sc_qp *qp, |
| 132 | struct irdma_aeqe_info *info) |
| 133 | { |
| 134 | qp->sq_flush_code = info->sq; |
| 135 | qp->rq_flush_code = info->rq; |
| 136 | qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
| 137 | |
| 138 | switch (info->ae_id) { |
| 139 | case IRDMA_AE_AMP_UNALLOCATED_STAG: |
| 140 | case IRDMA_AE_AMP_BOUNDS_VIOLATION: |
| 141 | case IRDMA_AE_AMP_INVALID_STAG: |
| 142 | qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; |
| 143 | fallthrough; |
| 144 | case IRDMA_AE_AMP_BAD_PD: |
| 145 | case IRDMA_AE_UDA_XMIT_BAD_PD: |
| 146 | qp->flush_code = FLUSH_PROT_ERR; |
| 147 | break; |
| 148 | case IRDMA_AE_AMP_BAD_QP: |
| 149 | qp->flush_code = FLUSH_LOC_QP_OP_ERR; |
| 150 | break; |
| 151 | case IRDMA_AE_AMP_BAD_STAG_KEY: |
| 152 | case IRDMA_AE_AMP_BAD_STAG_INDEX: |
| 153 | case IRDMA_AE_AMP_TO_WRAP: |
| 154 | case IRDMA_AE_AMP_RIGHTS_VIOLATION: |
| 155 | case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: |
| 156 | case IRDMA_AE_PRIV_OPERATION_DENIED: |
| 157 | case IRDMA_AE_IB_INVALID_REQUEST: |
| 158 | case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: |
| 159 | case IRDMA_AE_IB_REMOTE_OP_ERROR: |
| 160 | qp->flush_code = FLUSH_REM_ACCESS_ERR; |
| 161 | qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; |
| 162 | break; |
| 163 | case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: |
| 164 | case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: |
| 165 | case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: |
| 166 | case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: |
| 167 | case IRDMA_AE_UDA_L4LEN_INVALID: |
| 168 | case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: |
| 169 | qp->flush_code = FLUSH_LOC_LEN_ERR; |
| 170 | break; |
| 171 | case IRDMA_AE_LCE_QP_CATASTROPHIC: |
| 172 | qp->flush_code = FLUSH_FATAL_ERR; |
| 173 | break; |
| 174 | case IRDMA_AE_DDP_UBE_INVALID_MO: |
| 175 | case IRDMA_AE_IB_RREQ_AND_Q1_FULL: |
| 176 | case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: |
| 177 | qp->flush_code = FLUSH_GENERAL_ERR; |
| 178 | break; |
Sindhu Devale | d3bdcd5 | 2021-09-16 14:12:21 -0500 | [diff] [blame] | 179 | case IRDMA_AE_LLP_TOO_MANY_RETRIES: |
| 180 | qp->flush_code = FLUSH_RETRY_EXC_ERR; |
| 181 | break; |
Sindhu Devale | 9f7fa37 | 2021-09-16 14:12:22 -0500 | [diff] [blame] | 182 | case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: |
| 183 | case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: |
| 184 | case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: |
| 185 | qp->flush_code = FLUSH_MW_BIND_ERR; |
| 186 | break; |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 187 | default: |
| 188 | qp->flush_code = FLUSH_FATAL_ERR; |
| 189 | break; |
| 190 | } |
| 191 | } |
| 192 | |
| 193 | /** |
| 194 | * irdma_process_aeq - handle aeq events |
| 195 | * @rf: RDMA PCI function |
| 196 | */ |
| 197 | static void irdma_process_aeq(struct irdma_pci_f *rf) |
| 198 | { |
| 199 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 200 | struct irdma_aeq *aeq = &rf->aeq; |
| 201 | struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq; |
| 202 | struct irdma_aeqe_info aeinfo; |
| 203 | struct irdma_aeqe_info *info = &aeinfo; |
| 204 | int ret; |
| 205 | struct irdma_qp *iwqp = NULL; |
| 206 | struct irdma_sc_cq *cq = NULL; |
| 207 | struct irdma_cq *iwcq = NULL; |
| 208 | struct irdma_sc_qp *qp = NULL; |
| 209 | struct irdma_qp_host_ctx_info *ctx_info = NULL; |
| 210 | struct irdma_device *iwdev = rf->iwdev; |
| 211 | unsigned long flags; |
| 212 | |
| 213 | u32 aeqcnt = 0; |
| 214 | |
| 215 | if (!sc_aeq->size) |
| 216 | return; |
| 217 | |
| 218 | do { |
| 219 | memset(info, 0, sizeof(*info)); |
| 220 | ret = irdma_sc_get_next_aeqe(sc_aeq, info); |
| 221 | if (ret) |
| 222 | break; |
| 223 | |
| 224 | aeqcnt++; |
| 225 | ibdev_dbg(&iwdev->ibdev, |
| 226 | "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n", |
| 227 | info->ae_id, info->qp, info->qp_cq_id, info->tcp_state, |
| 228 | info->iwarp_state, info->ae_src); |
| 229 | |
| 230 | if (info->qp) { |
| 231 | spin_lock_irqsave(&rf->qptable_lock, flags); |
| 232 | iwqp = rf->qp_table[info->qp_cq_id]; |
| 233 | if (!iwqp) { |
| 234 | spin_unlock_irqrestore(&rf->qptable_lock, |
| 235 | flags); |
| 236 | if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) { |
| 237 | atomic_dec(&iwdev->vsi.qp_suspend_reqs); |
| 238 | wake_up(&iwdev->suspend_wq); |
| 239 | continue; |
| 240 | } |
| 241 | ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n", |
| 242 | info->qp_cq_id); |
| 243 | continue; |
| 244 | } |
| 245 | irdma_qp_add_ref(&iwqp->ibqp); |
| 246 | spin_unlock_irqrestore(&rf->qptable_lock, flags); |
| 247 | qp = &iwqp->sc_qp; |
| 248 | spin_lock_irqsave(&iwqp->lock, flags); |
| 249 | iwqp->hw_tcp_state = info->tcp_state; |
| 250 | iwqp->hw_iwarp_state = info->iwarp_state; |
| 251 | if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE) |
| 252 | iwqp->last_aeq = info->ae_id; |
| 253 | spin_unlock_irqrestore(&iwqp->lock, flags); |
| 254 | ctx_info = &iwqp->ctx_info; |
| 255 | if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) |
| 256 | ctx_info->roce_info->err_rq_idx_valid = true; |
| 257 | else |
| 258 | ctx_info->iwarp_info->err_rq_idx_valid = true; |
| 259 | } else { |
| 260 | if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR) |
| 261 | continue; |
| 262 | } |
| 263 | |
| 264 | switch (info->ae_id) { |
| 265 | struct irdma_cm_node *cm_node; |
| 266 | case IRDMA_AE_LLP_CONNECTION_ESTABLISHED: |
| 267 | cm_node = iwqp->cm_node; |
| 268 | if (cm_node->accept_pend) { |
| 269 | atomic_dec(&cm_node->listener->pend_accepts_cnt); |
| 270 | cm_node->accept_pend = 0; |
| 271 | } |
| 272 | iwqp->rts_ae_rcvd = 1; |
| 273 | wake_up_interruptible(&iwqp->waitq); |
| 274 | break; |
| 275 | case IRDMA_AE_LLP_FIN_RECEIVED: |
| 276 | case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE: |
| 277 | if (qp->term_flags) |
| 278 | break; |
| 279 | if (atomic_inc_return(&iwqp->close_timer_started) == 1) { |
| 280 | iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT; |
| 281 | if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT && |
| 282 | iwqp->ibqp_state == IB_QPS_RTS) { |
| 283 | irdma_next_iw_state(iwqp, |
| 284 | IRDMA_QP_STATE_CLOSING, |
| 285 | 0, 0, 0); |
| 286 | irdma_cm_disconn(iwqp); |
| 287 | } |
| 288 | irdma_schedule_cm_timer(iwqp->cm_node, |
| 289 | (struct irdma_puda_buf *)iwqp, |
| 290 | IRDMA_TIMER_TYPE_CLOSE, |
| 291 | 1, 0); |
| 292 | } |
| 293 | break; |
| 294 | case IRDMA_AE_LLP_CLOSE_COMPLETE: |
| 295 | if (qp->term_flags) |
| 296 | irdma_terminate_done(qp, 0); |
| 297 | else |
| 298 | irdma_cm_disconn(iwqp); |
| 299 | break; |
| 300 | case IRDMA_AE_BAD_CLOSE: |
| 301 | case IRDMA_AE_RESET_SENT: |
| 302 | irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, |
| 303 | 0); |
| 304 | irdma_cm_disconn(iwqp); |
| 305 | break; |
| 306 | case IRDMA_AE_LLP_CONNECTION_RESET: |
| 307 | if (atomic_read(&iwqp->close_timer_started)) |
| 308 | break; |
| 309 | irdma_cm_disconn(iwqp); |
| 310 | break; |
| 311 | case IRDMA_AE_QP_SUSPEND_COMPLETE: |
| 312 | if (iwqp->iwdev->vsi.tc_change_pending) { |
| 313 | atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs); |
| 314 | wake_up(&iwqp->iwdev->suspend_wq); |
| 315 | } |
| 316 | break; |
| 317 | case IRDMA_AE_TERMINATE_SENT: |
| 318 | irdma_terminate_send_fin(qp); |
| 319 | break; |
| 320 | case IRDMA_AE_LLP_TERMINATE_RECEIVED: |
| 321 | irdma_terminate_received(qp, info); |
| 322 | break; |
| 323 | case IRDMA_AE_CQ_OPERATION_ERROR: |
| 324 | ibdev_err(&iwdev->ibdev, |
| 325 | "Processing an iWARP related AE for CQ misc = 0x%04X\n", |
| 326 | info->ae_id); |
| 327 | cq = (struct irdma_sc_cq *)(unsigned long) |
| 328 | info->compl_ctx; |
| 329 | |
| 330 | iwcq = cq->back_cq; |
| 331 | |
| 332 | if (iwcq->ibcq.event_handler) { |
| 333 | struct ib_event ibevent; |
| 334 | |
| 335 | ibevent.device = iwcq->ibcq.device; |
| 336 | ibevent.event = IB_EVENT_CQ_ERR; |
| 337 | ibevent.element.cq = &iwcq->ibcq; |
| 338 | iwcq->ibcq.event_handler(&ibevent, |
| 339 | iwcq->ibcq.cq_context); |
| 340 | } |
| 341 | break; |
| 342 | case IRDMA_AE_RESET_NOT_SENT: |
| 343 | case IRDMA_AE_LLP_DOUBT_REACHABILITY: |
| 344 | case IRDMA_AE_RESOURCE_EXHAUSTION: |
| 345 | break; |
| 346 | case IRDMA_AE_PRIV_OPERATION_DENIED: |
| 347 | case IRDMA_AE_STAG_ZERO_INVALID: |
| 348 | case IRDMA_AE_IB_RREQ_AND_Q1_FULL: |
| 349 | case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: |
| 350 | case IRDMA_AE_DDP_UBE_INVALID_MO: |
| 351 | case IRDMA_AE_DDP_UBE_INVALID_QN: |
| 352 | case IRDMA_AE_DDP_NO_L_BIT: |
| 353 | case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: |
| 354 | case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: |
| 355 | case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: |
| 356 | case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: |
| 357 | case IRDMA_AE_INVALID_ARP_ENTRY: |
| 358 | case IRDMA_AE_INVALID_TCP_OPTION_RCVD: |
| 359 | case IRDMA_AE_STALE_ARP_ENTRY: |
| 360 | case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: |
| 361 | case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: |
| 362 | case IRDMA_AE_LLP_SYN_RECEIVED: |
| 363 | case IRDMA_AE_LLP_TOO_MANY_RETRIES: |
| 364 | case IRDMA_AE_LCE_QP_CATASTROPHIC: |
| 365 | case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: |
| 366 | case IRDMA_AE_LCE_CQ_CATASTROPHIC: |
| 367 | case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: |
| 368 | if (rdma_protocol_roce(&iwdev->ibdev, 1)) |
| 369 | ctx_info->roce_info->err_rq_idx_valid = false; |
| 370 | else |
| 371 | ctx_info->iwarp_info->err_rq_idx_valid = false; |
| 372 | fallthrough; |
| 373 | default: |
| 374 | ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d\n", |
| 375 | info->ae_id, info->qp, info->qp_cq_id); |
| 376 | if (rdma_protocol_roce(&iwdev->ibdev, 1)) { |
| 377 | if (!info->sq && ctx_info->roce_info->err_rq_idx_valid) { |
| 378 | ctx_info->roce_info->err_rq_idx = info->wqe_idx; |
| 379 | irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, |
| 380 | ctx_info); |
| 381 | } |
| 382 | irdma_set_flush_fields(qp, info); |
| 383 | irdma_cm_disconn(iwqp); |
| 384 | break; |
| 385 | } |
| 386 | if (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) { |
| 387 | ctx_info->iwarp_info->err_rq_idx = info->wqe_idx; |
| 388 | ctx_info->tcp_info_valid = false; |
| 389 | ctx_info->iwarp_info_valid = true; |
| 390 | irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, |
| 391 | ctx_info); |
| 392 | } |
| 393 | if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS && |
| 394 | iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) { |
| 395 | irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0); |
| 396 | irdma_cm_disconn(iwqp); |
| 397 | } else { |
| 398 | irdma_terminate_connection(qp, info); |
| 399 | } |
| 400 | break; |
| 401 | } |
| 402 | if (info->qp) |
| 403 | irdma_qp_rem_ref(&iwqp->ibqp); |
| 404 | } while (1); |
| 405 | |
| 406 | if (aeqcnt) |
| 407 | irdma_sc_repost_aeq_entries(dev, aeqcnt); |
| 408 | } |
| 409 | |
| 410 | /** |
| 411 | * irdma_ena_intr - set up device interrupts |
| 412 | * @dev: hardware control device structure |
| 413 | * @msix_id: id of the interrupt to be enabled |
| 414 | */ |
| 415 | static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id) |
| 416 | { |
| 417 | dev->irq_ops->irdma_en_irq(dev, msix_id); |
| 418 | } |
| 419 | |
| 420 | /** |
| 421 | * irdma_dpc - tasklet for aeq and ceq 0 |
| 422 | * @t: tasklet_struct ptr |
| 423 | */ |
| 424 | static void irdma_dpc(struct tasklet_struct *t) |
| 425 | { |
| 426 | struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); |
| 427 | |
| 428 | if (rf->msix_shared) |
| 429 | irdma_process_ceq(rf, rf->ceqlist); |
| 430 | irdma_process_aeq(rf); |
| 431 | irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx); |
| 432 | } |
| 433 | |
| 434 | /** |
| 435 | * irdma_ceq_dpc - dpc handler for CEQ |
| 436 | * @t: tasklet_struct ptr |
| 437 | */ |
| 438 | static void irdma_ceq_dpc(struct tasklet_struct *t) |
| 439 | { |
| 440 | struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet); |
| 441 | struct irdma_pci_f *rf = iwceq->rf; |
| 442 | |
| 443 | irdma_process_ceq(rf, iwceq); |
| 444 | irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx); |
| 445 | } |
| 446 | |
| 447 | /** |
| 448 | * irdma_save_msix_info - copy msix vector information to iwarp device |
| 449 | * @rf: RDMA PCI function |
| 450 | * |
| 451 | * Allocate iwdev msix table and copy the msix info to the table |
| 452 | * Return 0 if successful, otherwise return error |
| 453 | */ |
| 454 | static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf) |
| 455 | { |
| 456 | struct irdma_qvlist_info *iw_qvlist; |
| 457 | struct irdma_qv_info *iw_qvinfo; |
| 458 | struct msix_entry *pmsix; |
| 459 | u32 ceq_idx; |
| 460 | u32 i; |
| 461 | size_t size; |
| 462 | |
| 463 | if (!rf->msix_count) |
| 464 | return IRDMA_ERR_NO_INTR; |
| 465 | |
| 466 | size = sizeof(struct irdma_msix_vector) * rf->msix_count; |
| 467 | size += struct_size(iw_qvlist, qv_info, rf->msix_count); |
| 468 | rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); |
| 469 | if (!rf->iw_msixtbl) |
| 470 | return IRDMA_ERR_NO_MEMORY; |
| 471 | |
| 472 | rf->iw_qvlist = (struct irdma_qvlist_info *) |
| 473 | (&rf->iw_msixtbl[rf->msix_count]); |
| 474 | iw_qvlist = rf->iw_qvlist; |
| 475 | iw_qvinfo = iw_qvlist->qv_info; |
| 476 | iw_qvlist->num_vectors = rf->msix_count; |
| 477 | if (rf->msix_count <= num_online_cpus()) |
| 478 | rf->msix_shared = true; |
| 479 | |
| 480 | pmsix = rf->msix_entries; |
| 481 | for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { |
| 482 | rf->iw_msixtbl[i].idx = pmsix->entry; |
| 483 | rf->iw_msixtbl[i].irq = pmsix->vector; |
| 484 | rf->iw_msixtbl[i].cpu_affinity = ceq_idx; |
| 485 | if (!i) { |
| 486 | iw_qvinfo->aeq_idx = 0; |
| 487 | if (rf->msix_shared) |
| 488 | iw_qvinfo->ceq_idx = ceq_idx++; |
| 489 | else |
| 490 | iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX; |
| 491 | } else { |
| 492 | iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX; |
| 493 | iw_qvinfo->ceq_idx = ceq_idx++; |
| 494 | } |
| 495 | iw_qvinfo->itr_idx = 3; |
| 496 | iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; |
| 497 | pmsix++; |
| 498 | } |
| 499 | |
| 500 | return 0; |
| 501 | } |
| 502 | |
| 503 | /** |
| 504 | * irdma_irq_handler - interrupt handler for aeq and ceq0 |
| 505 | * @irq: Interrupt request number |
| 506 | * @data: RDMA PCI function |
| 507 | */ |
| 508 | static irqreturn_t irdma_irq_handler(int irq, void *data) |
| 509 | { |
| 510 | struct irdma_pci_f *rf = data; |
| 511 | |
| 512 | tasklet_schedule(&rf->dpc_tasklet); |
| 513 | |
| 514 | return IRQ_HANDLED; |
| 515 | } |
| 516 | |
| 517 | /** |
| 518 | * irdma_ceq_handler - interrupt handler for ceq |
| 519 | * @irq: interrupt request number |
| 520 | * @data: ceq pointer |
| 521 | */ |
| 522 | static irqreturn_t irdma_ceq_handler(int irq, void *data) |
| 523 | { |
| 524 | struct irdma_ceq *iwceq = data; |
| 525 | |
| 526 | if (iwceq->irq != irq) |
| 527 | ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n", |
| 528 | iwceq->irq, irq); |
| 529 | tasklet_schedule(&iwceq->dpc_tasklet); |
| 530 | |
| 531 | return IRQ_HANDLED; |
| 532 | } |
| 533 | |
| 534 | /** |
| 535 | * irdma_destroy_irq - destroy device interrupts |
| 536 | * @rf: RDMA PCI function |
| 537 | * @msix_vec: msix vector to disable irq |
| 538 | * @dev_id: parameter to pass to free_irq (used during irq setup) |
| 539 | * |
| 540 | * The function is called when destroying aeq/ceq |
| 541 | */ |
| 542 | static void irdma_destroy_irq(struct irdma_pci_f *rf, |
| 543 | struct irdma_msix_vector *msix_vec, void *dev_id) |
| 544 | { |
| 545 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 546 | |
| 547 | dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); |
| 548 | irq_set_affinity_hint(msix_vec->irq, NULL); |
| 549 | free_irq(msix_vec->irq, dev_id); |
| 550 | } |
| 551 | |
| 552 | /** |
| 553 | * irdma_destroy_cqp - destroy control qp |
| 554 | * @rf: RDMA PCI function |
| 555 | * @free_hwcqp: 1 if hw cqp should be freed |
| 556 | * |
| 557 | * Issue destroy cqp request and |
| 558 | * free the resources associated with the cqp |
| 559 | */ |
| 560 | static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp) |
| 561 | { |
| 562 | enum irdma_status_code status = 0; |
| 563 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 564 | struct irdma_cqp *cqp = &rf->cqp; |
| 565 | |
| 566 | if (rf->cqp_cmpl_wq) |
| 567 | destroy_workqueue(rf->cqp_cmpl_wq); |
| 568 | if (free_hwcqp) |
| 569 | status = irdma_sc_cqp_destroy(dev->cqp); |
| 570 | if (status) |
| 571 | ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status); |
| 572 | |
| 573 | irdma_cleanup_pending_cqp_op(rf); |
| 574 | dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va, |
| 575 | cqp->sq.pa); |
| 576 | cqp->sq.va = NULL; |
| 577 | kfree(cqp->scratch_array); |
| 578 | cqp->scratch_array = NULL; |
| 579 | kfree(cqp->cqp_requests); |
| 580 | cqp->cqp_requests = NULL; |
| 581 | } |
| 582 | |
| 583 | static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf) |
| 584 | { |
| 585 | struct irdma_aeq *aeq = &rf->aeq; |
| 586 | u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); |
| 587 | dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; |
| 588 | |
| 589 | irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); |
| 590 | irdma_free_pble(rf->pble_rsrc, &aeq->palloc); |
| 591 | vfree(aeq->mem.va); |
| 592 | } |
| 593 | |
| 594 | /** |
| 595 | * irdma_destroy_aeq - destroy aeq |
| 596 | * @rf: RDMA PCI function |
| 597 | * |
| 598 | * Issue a destroy aeq request and |
| 599 | * free the resources associated with the aeq |
| 600 | * The function is called during driver unload |
| 601 | */ |
| 602 | static void irdma_destroy_aeq(struct irdma_pci_f *rf) |
| 603 | { |
| 604 | enum irdma_status_code status = IRDMA_ERR_NOT_READY; |
| 605 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 606 | struct irdma_aeq *aeq = &rf->aeq; |
| 607 | |
| 608 | if (!rf->msix_shared) { |
| 609 | rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false); |
| 610 | irdma_destroy_irq(rf, rf->iw_msixtbl, rf); |
| 611 | } |
| 612 | if (rf->reset) |
| 613 | goto exit; |
| 614 | |
| 615 | aeq->sc_aeq.size = 0; |
| 616 | status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY); |
| 617 | if (status) |
| 618 | ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status); |
| 619 | |
| 620 | exit: |
| 621 | if (aeq->virtual_map) { |
| 622 | irdma_destroy_virt_aeq(rf); |
| 623 | } else { |
| 624 | dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, |
| 625 | aeq->mem.pa); |
| 626 | aeq->mem.va = NULL; |
| 627 | } |
| 628 | } |
| 629 | |
| 630 | /** |
| 631 | * irdma_destroy_ceq - destroy ceq |
| 632 | * @rf: RDMA PCI function |
| 633 | * @iwceq: ceq to be destroyed |
| 634 | * |
| 635 | * Issue a destroy ceq request and |
| 636 | * free the resources associated with the ceq |
| 637 | */ |
| 638 | static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) |
| 639 | { |
| 640 | enum irdma_status_code status; |
| 641 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 642 | |
| 643 | if (rf->reset) |
| 644 | goto exit; |
| 645 | |
| 646 | status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1); |
| 647 | if (status) { |
| 648 | ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status); |
| 649 | goto exit; |
| 650 | } |
| 651 | |
| 652 | status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq); |
| 653 | if (status) |
| 654 | ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n", |
| 655 | status); |
| 656 | exit: |
| 657 | dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va, |
| 658 | iwceq->mem.pa); |
| 659 | iwceq->mem.va = NULL; |
| 660 | } |
| 661 | |
| 662 | /** |
| 663 | * irdma_del_ceq_0 - destroy ceq 0 |
| 664 | * @rf: RDMA PCI function |
| 665 | * |
| 666 | * Disable the ceq 0 interrupt and destroy the ceq 0 |
| 667 | */ |
| 668 | static void irdma_del_ceq_0(struct irdma_pci_f *rf) |
| 669 | { |
| 670 | struct irdma_ceq *iwceq = rf->ceqlist; |
| 671 | struct irdma_msix_vector *msix_vec; |
| 672 | |
| 673 | if (rf->msix_shared) { |
| 674 | msix_vec = &rf->iw_msixtbl[0]; |
| 675 | rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, |
| 676 | msix_vec->ceq_id, |
| 677 | msix_vec->idx, false); |
| 678 | irdma_destroy_irq(rf, msix_vec, rf); |
| 679 | } else { |
| 680 | msix_vec = &rf->iw_msixtbl[1]; |
| 681 | irdma_destroy_irq(rf, msix_vec, iwceq); |
| 682 | } |
| 683 | |
| 684 | irdma_destroy_ceq(rf, iwceq); |
| 685 | rf->sc_dev.ceq_valid = false; |
| 686 | rf->ceqs_count = 0; |
| 687 | } |
| 688 | |
| 689 | /** |
| 690 | * irdma_del_ceqs - destroy all ceq's except CEQ 0 |
| 691 | * @rf: RDMA PCI function |
| 692 | * |
| 693 | * Go through all of the device ceq's, except 0, and for each |
| 694 | * ceq disable the ceq interrupt and destroy the ceq |
| 695 | */ |
| 696 | static void irdma_del_ceqs(struct irdma_pci_f *rf) |
| 697 | { |
| 698 | struct irdma_ceq *iwceq = &rf->ceqlist[1]; |
| 699 | struct irdma_msix_vector *msix_vec; |
| 700 | u32 i = 0; |
| 701 | |
| 702 | if (rf->msix_shared) |
| 703 | msix_vec = &rf->iw_msixtbl[1]; |
| 704 | else |
| 705 | msix_vec = &rf->iw_msixtbl[2]; |
| 706 | |
| 707 | for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { |
| 708 | rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id, |
| 709 | msix_vec->idx, false); |
| 710 | irdma_destroy_irq(rf, msix_vec, iwceq); |
| 711 | irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, |
| 712 | IRDMA_OP_CEQ_DESTROY); |
| 713 | dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size, |
| 714 | iwceq->mem.va, iwceq->mem.pa); |
| 715 | iwceq->mem.va = NULL; |
| 716 | } |
| 717 | rf->ceqs_count = 1; |
| 718 | } |
| 719 | |
| 720 | /** |
| 721 | * irdma_destroy_ccq - destroy control cq |
| 722 | * @rf: RDMA PCI function |
| 723 | * |
| 724 | * Issue destroy ccq request and |
| 725 | * free the resources associated with the ccq |
| 726 | */ |
| 727 | static void irdma_destroy_ccq(struct irdma_pci_f *rf) |
| 728 | { |
| 729 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 730 | struct irdma_ccq *ccq = &rf->ccq; |
| 731 | enum irdma_status_code status = 0; |
| 732 | |
| 733 | if (!rf->reset) |
| 734 | status = irdma_sc_ccq_destroy(dev->ccq, 0, true); |
| 735 | if (status) |
| 736 | ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status); |
| 737 | dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va, |
| 738 | ccq->mem_cq.pa); |
| 739 | ccq->mem_cq.va = NULL; |
| 740 | } |
| 741 | |
| 742 | /** |
| 743 | * irdma_close_hmc_objects_type - delete hmc objects of a given type |
| 744 | * @dev: iwarp device |
| 745 | * @obj_type: the hmc object type to be deleted |
| 746 | * @hmc_info: host memory info struct |
| 747 | * @privileged: permission to close HMC objects |
| 748 | * @reset: true if called before reset |
| 749 | */ |
| 750 | static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev, |
| 751 | enum irdma_hmc_rsrc_type obj_type, |
| 752 | struct irdma_hmc_info *hmc_info, |
| 753 | bool privileged, bool reset) |
| 754 | { |
| 755 | struct irdma_hmc_del_obj_info info = {}; |
| 756 | |
| 757 | info.hmc_info = hmc_info; |
| 758 | info.rsrc_type = obj_type; |
| 759 | info.count = hmc_info->hmc_obj[obj_type].cnt; |
| 760 | info.privileged = privileged; |
| 761 | if (irdma_sc_del_hmc_obj(dev, &info, reset)) |
| 762 | ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n", |
| 763 | obj_type); |
| 764 | } |
| 765 | |
| 766 | /** |
| 767 | * irdma_del_hmc_objects - remove all device hmc objects |
| 768 | * @dev: iwarp device |
| 769 | * @hmc_info: hmc_info to free |
| 770 | * @privileged: permission to delete HMC objects |
| 771 | * @reset: true if called before reset |
| 772 | * @vers: hardware version |
| 773 | */ |
| 774 | static void irdma_del_hmc_objects(struct irdma_sc_dev *dev, |
| 775 | struct irdma_hmc_info *hmc_info, bool privileged, |
| 776 | bool reset, enum irdma_vers vers) |
| 777 | { |
| 778 | unsigned int i; |
| 779 | |
| 780 | for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { |
| 781 | if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) |
| 782 | irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i], |
| 783 | hmc_info, privileged, reset); |
| 784 | if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) |
| 785 | break; |
| 786 | } |
| 787 | } |
| 788 | |
| 789 | /** |
| 790 | * irdma_create_hmc_obj_type - create hmc object of a given type |
| 791 | * @dev: hardware control device structure |
| 792 | * @info: information for the hmc object to create |
| 793 | */ |
| 794 | static enum irdma_status_code |
| 795 | irdma_create_hmc_obj_type(struct irdma_sc_dev *dev, |
| 796 | struct irdma_hmc_create_obj_info *info) |
| 797 | { |
| 798 | return irdma_sc_create_hmc_obj(dev, info); |
| 799 | } |
| 800 | |
| 801 | /** |
| 802 | * irdma_create_hmc_objs - create all hmc objects for the device |
| 803 | * @rf: RDMA PCI function |
| 804 | * @privileged: permission to create HMC objects |
| 805 | * @vers: HW version |
| 806 | * |
| 807 | * Create the device hmc objects and allocate hmc pages |
| 808 | * Return 0 if successful, otherwise clean up and return error |
| 809 | */ |
| 810 | static enum irdma_status_code |
| 811 | irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers vers) |
| 812 | { |
| 813 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 814 | struct irdma_hmc_create_obj_info info = {}; |
| 815 | enum irdma_status_code status = 0; |
| 816 | int i; |
| 817 | |
| 818 | info.hmc_info = dev->hmc_info; |
| 819 | info.privileged = privileged; |
| 820 | info.entry_type = rf->sd_type; |
| 821 | |
| 822 | for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { |
| 823 | if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) { |
| 824 | info.rsrc_type = iw_hmc_obj_types[i]; |
| 825 | info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; |
| 826 | info.add_sd_cnt = 0; |
| 827 | status = irdma_create_hmc_obj_type(dev, &info); |
| 828 | if (status) { |
| 829 | ibdev_dbg(to_ibdev(dev), |
| 830 | "ERR: create obj type %d status = %d\n", |
| 831 | iw_hmc_obj_types[i], status); |
| 832 | break; |
| 833 | } |
| 834 | } |
| 835 | if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) |
| 836 | break; |
| 837 | } |
| 838 | |
| 839 | if (!status) |
| 840 | return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id, |
| 841 | true, true); |
| 842 | |
| 843 | while (i) { |
| 844 | i--; |
| 845 | /* destroy the hmc objects of a given type */ |
| 846 | if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) |
| 847 | irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i], |
| 848 | dev->hmc_info, privileged, |
| 849 | false); |
| 850 | } |
| 851 | |
| 852 | return status; |
| 853 | } |
| 854 | |
| 855 | /** |
| 856 | * irdma_obj_aligned_mem - get aligned memory from device allocated memory |
| 857 | * @rf: RDMA PCI function |
| 858 | * @memptr: points to the memory addresses |
| 859 | * @size: size of memory needed |
| 860 | * @mask: mask for the aligned memory |
| 861 | * |
| 862 | * Get aligned memory of the requested size and |
| 863 | * update the memptr to point to the new aligned memory |
| 864 | * Return 0 if successful, otherwise return no memory error |
| 865 | */ |
| 866 | static enum irdma_status_code |
| 867 | irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr, |
| 868 | u32 size, u32 mask) |
| 869 | { |
| 870 | unsigned long va, newva; |
| 871 | unsigned long extra; |
| 872 | |
| 873 | va = (unsigned long)rf->obj_next.va; |
| 874 | newva = va; |
| 875 | if (mask) |
| 876 | newva = ALIGN(va, (unsigned long)mask + 1ULL); |
| 877 | extra = newva - va; |
| 878 | memptr->va = (u8 *)va + extra; |
| 879 | memptr->pa = rf->obj_next.pa + extra; |
| 880 | memptr->size = size; |
| 881 | if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) |
| 882 | return IRDMA_ERR_NO_MEMORY; |
| 883 | |
| 884 | rf->obj_next.va = (u8 *)memptr->va + size; |
| 885 | rf->obj_next.pa = memptr->pa + size; |
| 886 | |
| 887 | return 0; |
| 888 | } |
| 889 | |
| 890 | /** |
| 891 | * irdma_create_cqp - create control qp |
| 892 | * @rf: RDMA PCI function |
| 893 | * |
| 894 | * Return 0, if the cqp and all the resources associated with it |
| 895 | * are successfully created, otherwise return error |
| 896 | */ |
| 897 | static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf) |
| 898 | { |
| 899 | enum irdma_status_code status; |
| 900 | u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048; |
| 901 | struct irdma_dma_mem mem; |
| 902 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 903 | struct irdma_cqp_init_info cqp_init_info = {}; |
| 904 | struct irdma_cqp *cqp = &rf->cqp; |
| 905 | u16 maj_err, min_err; |
| 906 | int i; |
| 907 | |
| 908 | cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); |
| 909 | if (!cqp->cqp_requests) |
| 910 | return IRDMA_ERR_NO_MEMORY; |
| 911 | |
| 912 | cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); |
| 913 | if (!cqp->scratch_array) { |
| 914 | kfree(cqp->cqp_requests); |
| 915 | return IRDMA_ERR_NO_MEMORY; |
| 916 | } |
| 917 | |
| 918 | dev->cqp = &cqp->sc_cqp; |
| 919 | dev->cqp->dev = dev; |
| 920 | cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize, |
| 921 | IRDMA_CQP_ALIGNMENT); |
| 922 | cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size, |
| 923 | &cqp->sq.pa, GFP_KERNEL); |
| 924 | if (!cqp->sq.va) { |
| 925 | kfree(cqp->scratch_array); |
| 926 | kfree(cqp->cqp_requests); |
| 927 | return IRDMA_ERR_NO_MEMORY; |
| 928 | } |
| 929 | |
| 930 | status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx), |
| 931 | IRDMA_HOST_CTX_ALIGNMENT_M); |
| 932 | if (status) |
| 933 | goto exit; |
| 934 | |
| 935 | dev->cqp->host_ctx_pa = mem.pa; |
| 936 | dev->cqp->host_ctx = mem.va; |
| 937 | /* populate the cqp init info */ |
| 938 | cqp_init_info.dev = dev; |
| 939 | cqp_init_info.sq_size = sqsize; |
| 940 | cqp_init_info.sq = cqp->sq.va; |
| 941 | cqp_init_info.sq_pa = cqp->sq.pa; |
| 942 | cqp_init_info.host_ctx_pa = mem.pa; |
| 943 | cqp_init_info.host_ctx = mem.va; |
| 944 | cqp_init_info.hmc_profile = rf->rsrc_profile; |
| 945 | cqp_init_info.scratch_array = cqp->scratch_array; |
| 946 | cqp_init_info.protocol_used = rf->protocol_used; |
| 947 | |
| 948 | switch (rf->rdma_ver) { |
| 949 | case IRDMA_GEN_1: |
| 950 | cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1; |
| 951 | break; |
| 952 | case IRDMA_GEN_2: |
| 953 | cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2; |
| 954 | break; |
| 955 | } |
| 956 | status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info); |
| 957 | if (status) { |
| 958 | ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status); |
| 959 | goto exit; |
| 960 | } |
| 961 | |
| 962 | spin_lock_init(&cqp->req_lock); |
| 963 | spin_lock_init(&cqp->compl_lock); |
| 964 | |
| 965 | status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err); |
| 966 | if (status) { |
| 967 | ibdev_dbg(to_ibdev(dev), |
| 968 | "ERR: cqp create failed - status %d maj_err %d min_err %d\n", |
| 969 | status, maj_err, min_err); |
| 970 | goto exit; |
| 971 | } |
| 972 | |
| 973 | INIT_LIST_HEAD(&cqp->cqp_avail_reqs); |
| 974 | INIT_LIST_HEAD(&cqp->cqp_pending_reqs); |
| 975 | |
| 976 | /* init the waitqueue of the cqp_requests and add them to the list */ |
| 977 | for (i = 0; i < sqsize; i++) { |
| 978 | init_waitqueue_head(&cqp->cqp_requests[i].waitq); |
| 979 | list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); |
| 980 | } |
| 981 | init_waitqueue_head(&cqp->remove_wq); |
| 982 | return 0; |
| 983 | |
| 984 | exit: |
| 985 | irdma_destroy_cqp(rf, false); |
| 986 | |
| 987 | return status; |
| 988 | } |
| 989 | |
| 990 | /** |
| 991 | * irdma_create_ccq - create control cq |
| 992 | * @rf: RDMA PCI function |
| 993 | * |
| 994 | * Return 0, if the ccq and the resources associated with it |
| 995 | * are successfully created, otherwise return error |
| 996 | */ |
| 997 | static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf) |
| 998 | { |
| 999 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 1000 | enum irdma_status_code status; |
| 1001 | struct irdma_ccq_init_info info = {}; |
| 1002 | struct irdma_ccq *ccq = &rf->ccq; |
| 1003 | |
| 1004 | dev->ccq = &ccq->sc_cq; |
| 1005 | dev->ccq->dev = dev; |
| 1006 | info.dev = dev; |
| 1007 | ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area); |
| 1008 | ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE, |
| 1009 | IRDMA_CQ0_ALIGNMENT); |
| 1010 | ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size, |
| 1011 | &ccq->mem_cq.pa, GFP_KERNEL); |
| 1012 | if (!ccq->mem_cq.va) |
| 1013 | return IRDMA_ERR_NO_MEMORY; |
| 1014 | |
| 1015 | status = irdma_obj_aligned_mem(rf, &ccq->shadow_area, |
| 1016 | ccq->shadow_area.size, |
| 1017 | IRDMA_SHADOWAREA_M); |
| 1018 | if (status) |
| 1019 | goto exit; |
| 1020 | |
| 1021 | ccq->sc_cq.back_cq = ccq; |
| 1022 | /* populate the ccq init info */ |
| 1023 | info.cq_base = ccq->mem_cq.va; |
| 1024 | info.cq_pa = ccq->mem_cq.pa; |
| 1025 | info.num_elem = IW_CCQ_SIZE; |
| 1026 | info.shadow_area = ccq->shadow_area.va; |
| 1027 | info.shadow_area_pa = ccq->shadow_area.pa; |
| 1028 | info.ceqe_mask = false; |
| 1029 | info.ceq_id_valid = true; |
| 1030 | info.shadow_read_threshold = 16; |
| 1031 | info.vsi = &rf->default_vsi; |
| 1032 | status = irdma_sc_ccq_init(dev->ccq, &info); |
| 1033 | if (!status) |
| 1034 | status = irdma_sc_ccq_create(dev->ccq, 0, true, true); |
| 1035 | exit: |
| 1036 | if (status) { |
| 1037 | dma_free_coherent(dev->hw->device, ccq->mem_cq.size, |
| 1038 | ccq->mem_cq.va, ccq->mem_cq.pa); |
| 1039 | ccq->mem_cq.va = NULL; |
| 1040 | } |
| 1041 | |
| 1042 | return status; |
| 1043 | } |
| 1044 | |
| 1045 | /** |
| 1046 | * irdma_alloc_set_mac - set up a mac address table entry |
| 1047 | * @iwdev: irdma device |
| 1048 | * |
| 1049 | * Allocate a mac ip entry and add it to the hw table Return 0 |
| 1050 | * if successful, otherwise return error |
| 1051 | */ |
| 1052 | static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev) |
| 1053 | { |
| 1054 | enum irdma_status_code status; |
| 1055 | |
| 1056 | status = irdma_alloc_local_mac_entry(iwdev->rf, |
| 1057 | &iwdev->mac_ip_table_idx); |
| 1058 | if (!status) { |
| 1059 | status = irdma_add_local_mac_entry(iwdev->rf, |
Jakub Kicinski | fd92213 | 2021-10-19 11:26:04 -0700 | [diff] [blame] | 1060 | (const u8 *)iwdev->netdev->dev_addr, |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 1061 | (u8)iwdev->mac_ip_table_idx); |
| 1062 | if (status) |
| 1063 | irdma_del_local_mac_entry(iwdev->rf, |
| 1064 | (u8)iwdev->mac_ip_table_idx); |
| 1065 | } |
| 1066 | return status; |
| 1067 | } |
| 1068 | |
| 1069 | /** |
| 1070 | * irdma_cfg_ceq_vector - set up the msix interrupt vector for |
| 1071 | * ceq |
| 1072 | * @rf: RDMA PCI function |
| 1073 | * @iwceq: ceq associated with the vector |
| 1074 | * @ceq_id: the id number of the iwceq |
| 1075 | * @msix_vec: interrupt vector information |
| 1076 | * |
| 1077 | * Allocate interrupt resources and enable irq handling |
| 1078 | * Return 0 if successful, otherwise return error |
| 1079 | */ |
| 1080 | static enum irdma_status_code |
| 1081 | irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, |
| 1082 | u32 ceq_id, struct irdma_msix_vector *msix_vec) |
| 1083 | { |
| 1084 | int status; |
| 1085 | |
| 1086 | if (rf->msix_shared && !ceq_id) { |
| 1087 | tasklet_setup(&rf->dpc_tasklet, irdma_dpc); |
| 1088 | status = request_irq(msix_vec->irq, irdma_irq_handler, 0, |
| 1089 | "AEQCEQ", rf); |
| 1090 | } else { |
| 1091 | tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc); |
| 1092 | |
| 1093 | status = request_irq(msix_vec->irq, irdma_ceq_handler, 0, |
| 1094 | "CEQ", iwceq); |
| 1095 | } |
| 1096 | cpumask_clear(&msix_vec->mask); |
| 1097 | cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); |
| 1098 | irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); |
| 1099 | if (status) { |
| 1100 | ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); |
| 1101 | return IRDMA_ERR_CFG; |
| 1102 | } |
| 1103 | |
| 1104 | msix_vec->ceq_id = ceq_id; |
| 1105 | rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true); |
| 1106 | |
| 1107 | return 0; |
| 1108 | } |
| 1109 | |
| 1110 | /** |
| 1111 | * irdma_cfg_aeq_vector - set up the msix vector for aeq |
| 1112 | * @rf: RDMA PCI function |
| 1113 | * |
| 1114 | * Allocate interrupt resources and enable irq handling |
| 1115 | * Return 0 if successful, otherwise return error |
| 1116 | */ |
| 1117 | static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf) |
| 1118 | { |
| 1119 | struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; |
| 1120 | u32 ret = 0; |
| 1121 | |
| 1122 | if (!rf->msix_shared) { |
| 1123 | tasklet_setup(&rf->dpc_tasklet, irdma_dpc); |
| 1124 | ret = request_irq(msix_vec->irq, irdma_irq_handler, 0, |
| 1125 | "irdma", rf); |
| 1126 | } |
| 1127 | if (ret) { |
| 1128 | ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n"); |
| 1129 | return IRDMA_ERR_CFG; |
| 1130 | } |
| 1131 | |
| 1132 | rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true); |
| 1133 | |
| 1134 | return 0; |
| 1135 | } |
| 1136 | |
| 1137 | /** |
| 1138 | * irdma_create_ceq - create completion event queue |
| 1139 | * @rf: RDMA PCI function |
| 1140 | * @iwceq: pointer to the ceq resources to be created |
| 1141 | * @ceq_id: the id number of the iwceq |
| 1142 | * @vsi: SC vsi struct |
| 1143 | * |
| 1144 | * Return 0, if the ceq and the resources associated with it |
| 1145 | * are successfully created, otherwise return error |
| 1146 | */ |
| 1147 | static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf, |
| 1148 | struct irdma_ceq *iwceq, |
| 1149 | u32 ceq_id, |
| 1150 | struct irdma_sc_vsi *vsi) |
| 1151 | { |
| 1152 | enum irdma_status_code status; |
| 1153 | struct irdma_ceq_init_info info = {}; |
| 1154 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 1155 | u64 scratch; |
| 1156 | u32 ceq_size; |
| 1157 | |
| 1158 | info.ceq_id = ceq_id; |
| 1159 | iwceq->rf = rf; |
| 1160 | ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, |
| 1161 | dev->hw_attrs.max_hw_ceq_size); |
| 1162 | iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size, |
| 1163 | IRDMA_CEQ_ALIGNMENT); |
| 1164 | iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size, |
| 1165 | &iwceq->mem.pa, GFP_KERNEL); |
| 1166 | if (!iwceq->mem.va) |
| 1167 | return IRDMA_ERR_NO_MEMORY; |
| 1168 | |
| 1169 | info.ceq_id = ceq_id; |
| 1170 | info.ceqe_base = iwceq->mem.va; |
| 1171 | info.ceqe_pa = iwceq->mem.pa; |
| 1172 | info.elem_cnt = ceq_size; |
| 1173 | iwceq->sc_ceq.ceq_id = ceq_id; |
| 1174 | info.dev = dev; |
| 1175 | info.vsi = vsi; |
| 1176 | scratch = (uintptr_t)&rf->cqp.sc_cqp; |
| 1177 | status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info); |
| 1178 | if (!status) { |
| 1179 | if (dev->ceq_valid) |
| 1180 | status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, |
| 1181 | IRDMA_OP_CEQ_CREATE); |
| 1182 | else |
| 1183 | status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch); |
| 1184 | } |
| 1185 | |
| 1186 | if (status) { |
| 1187 | dma_free_coherent(dev->hw->device, iwceq->mem.size, |
| 1188 | iwceq->mem.va, iwceq->mem.pa); |
| 1189 | iwceq->mem.va = NULL; |
| 1190 | } |
| 1191 | |
| 1192 | return status; |
| 1193 | } |
| 1194 | |
| 1195 | /** |
| 1196 | * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource |
| 1197 | * @rf: RDMA PCI function |
| 1198 | * |
| 1199 | * Allocate a list for all device completion event queues |
| 1200 | * Create the ceq 0 and configure it's msix interrupt vector |
| 1201 | * Return 0, if successfully set up, otherwise return error |
| 1202 | */ |
| 1203 | static enum irdma_status_code irdma_setup_ceq_0(struct irdma_pci_f *rf) |
| 1204 | { |
| 1205 | struct irdma_ceq *iwceq; |
| 1206 | struct irdma_msix_vector *msix_vec; |
| 1207 | u32 i; |
| 1208 | enum irdma_status_code status = 0; |
| 1209 | u32 num_ceqs; |
| 1210 | |
| 1211 | num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); |
| 1212 | rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); |
| 1213 | if (!rf->ceqlist) { |
| 1214 | status = IRDMA_ERR_NO_MEMORY; |
| 1215 | goto exit; |
| 1216 | } |
| 1217 | |
| 1218 | iwceq = &rf->ceqlist[0]; |
| 1219 | status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi); |
| 1220 | if (status) { |
| 1221 | ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n", |
| 1222 | status); |
| 1223 | goto exit; |
| 1224 | } |
| 1225 | |
| 1226 | spin_lock_init(&iwceq->ce_lock); |
| 1227 | i = rf->msix_shared ? 0 : 1; |
| 1228 | msix_vec = &rf->iw_msixtbl[i]; |
| 1229 | iwceq->irq = msix_vec->irq; |
| 1230 | iwceq->msix_idx = msix_vec->idx; |
| 1231 | status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec); |
| 1232 | if (status) { |
| 1233 | irdma_destroy_ceq(rf, iwceq); |
| 1234 | goto exit; |
| 1235 | } |
| 1236 | |
| 1237 | irdma_ena_intr(&rf->sc_dev, msix_vec->idx); |
| 1238 | rf->ceqs_count++; |
| 1239 | |
| 1240 | exit: |
| 1241 | if (status && !rf->ceqs_count) { |
| 1242 | kfree(rf->ceqlist); |
| 1243 | rf->ceqlist = NULL; |
| 1244 | return status; |
| 1245 | } |
| 1246 | rf->sc_dev.ceq_valid = true; |
| 1247 | |
| 1248 | return 0; |
| 1249 | } |
| 1250 | |
| 1251 | /** |
| 1252 | * irdma_setup_ceqs - manage the device ceq's and their interrupt resources |
| 1253 | * @rf: RDMA PCI function |
| 1254 | * @vsi: VSI structure for this CEQ |
| 1255 | * |
| 1256 | * Allocate a list for all device completion event queues |
| 1257 | * Create the ceq's and configure their msix interrupt vectors |
| 1258 | * Return 0, if ceqs are successfully set up, otherwise return error |
| 1259 | */ |
| 1260 | static enum irdma_status_code irdma_setup_ceqs(struct irdma_pci_f *rf, |
| 1261 | struct irdma_sc_vsi *vsi) |
| 1262 | { |
| 1263 | u32 i; |
| 1264 | u32 ceq_id; |
| 1265 | struct irdma_ceq *iwceq; |
| 1266 | struct irdma_msix_vector *msix_vec; |
| 1267 | enum irdma_status_code status; |
| 1268 | u32 num_ceqs; |
| 1269 | |
| 1270 | num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); |
| 1271 | i = (rf->msix_shared) ? 1 : 2; |
| 1272 | for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) { |
| 1273 | iwceq = &rf->ceqlist[ceq_id]; |
| 1274 | status = irdma_create_ceq(rf, iwceq, ceq_id, vsi); |
| 1275 | if (status) { |
| 1276 | ibdev_dbg(&rf->iwdev->ibdev, |
| 1277 | "ERR: create ceq status = %d\n", status); |
| 1278 | goto del_ceqs; |
| 1279 | } |
| 1280 | spin_lock_init(&iwceq->ce_lock); |
| 1281 | msix_vec = &rf->iw_msixtbl[i]; |
| 1282 | iwceq->irq = msix_vec->irq; |
| 1283 | iwceq->msix_idx = msix_vec->idx; |
| 1284 | status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); |
| 1285 | if (status) { |
| 1286 | irdma_destroy_ceq(rf, iwceq); |
| 1287 | goto del_ceqs; |
| 1288 | } |
| 1289 | irdma_ena_intr(&rf->sc_dev, msix_vec->idx); |
| 1290 | rf->ceqs_count++; |
| 1291 | } |
| 1292 | |
| 1293 | return 0; |
| 1294 | |
| 1295 | del_ceqs: |
| 1296 | irdma_del_ceqs(rf); |
| 1297 | |
| 1298 | return status; |
| 1299 | } |
| 1300 | |
| 1301 | static enum irdma_status_code irdma_create_virt_aeq(struct irdma_pci_f *rf, |
| 1302 | u32 size) |
| 1303 | { |
| 1304 | enum irdma_status_code status = IRDMA_ERR_NO_MEMORY; |
| 1305 | struct irdma_aeq *aeq = &rf->aeq; |
| 1306 | dma_addr_t *pg_arr; |
| 1307 | u32 pg_cnt; |
| 1308 | |
| 1309 | if (rf->rdma_ver < IRDMA_GEN_2) |
| 1310 | return IRDMA_NOT_SUPPORTED; |
| 1311 | |
| 1312 | aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size; |
| 1313 | aeq->mem.va = vzalloc(aeq->mem.size); |
| 1314 | |
| 1315 | if (!aeq->mem.va) |
| 1316 | return status; |
| 1317 | |
| 1318 | pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); |
| 1319 | status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); |
| 1320 | if (status) { |
| 1321 | vfree(aeq->mem.va); |
| 1322 | return status; |
| 1323 | } |
| 1324 | |
Shiraz Saleem | 2db7b2e | 2021-06-09 18:49:24 -0500 | [diff] [blame] | 1325 | pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 1326 | status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); |
| 1327 | if (status) { |
| 1328 | irdma_free_pble(rf->pble_rsrc, &aeq->palloc); |
| 1329 | vfree(aeq->mem.va); |
| 1330 | return status; |
| 1331 | } |
| 1332 | |
| 1333 | return 0; |
| 1334 | } |
| 1335 | |
| 1336 | /** |
| 1337 | * irdma_create_aeq - create async event queue |
| 1338 | * @rf: RDMA PCI function |
| 1339 | * |
| 1340 | * Return 0, if the aeq and the resources associated with it |
| 1341 | * are successfully created, otherwise return error |
| 1342 | */ |
| 1343 | static enum irdma_status_code irdma_create_aeq(struct irdma_pci_f *rf) |
| 1344 | { |
| 1345 | enum irdma_status_code status; |
| 1346 | struct irdma_aeq_init_info info = {}; |
| 1347 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 1348 | struct irdma_aeq *aeq = &rf->aeq; |
| 1349 | struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; |
| 1350 | u32 aeq_size; |
| 1351 | u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; |
| 1352 | |
| 1353 | aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt + |
| 1354 | hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; |
| 1355 | aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size); |
| 1356 | |
| 1357 | aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size, |
| 1358 | IRDMA_AEQ_ALIGNMENT); |
| 1359 | aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size, |
| 1360 | &aeq->mem.pa, |
| 1361 | GFP_KERNEL | __GFP_NOWARN); |
| 1362 | if (aeq->mem.va) |
| 1363 | goto skip_virt_aeq; |
| 1364 | |
| 1365 | /* physically mapped aeq failed. setup virtual aeq */ |
| 1366 | status = irdma_create_virt_aeq(rf, aeq_size); |
| 1367 | if (status) |
| 1368 | return status; |
| 1369 | |
| 1370 | info.virtual_map = true; |
| 1371 | aeq->virtual_map = info.virtual_map; |
| 1372 | info.pbl_chunk_size = 1; |
| 1373 | info.first_pm_pbl_idx = aeq->palloc.level1.idx; |
| 1374 | |
| 1375 | skip_virt_aeq: |
| 1376 | info.aeqe_base = aeq->mem.va; |
| 1377 | info.aeq_elem_pa = aeq->mem.pa; |
| 1378 | info.elem_cnt = aeq_size; |
| 1379 | info.dev = dev; |
| 1380 | info.msix_idx = rf->iw_msixtbl->idx; |
| 1381 | status = irdma_sc_aeq_init(&aeq->sc_aeq, &info); |
| 1382 | if (status) |
| 1383 | goto err; |
| 1384 | |
| 1385 | status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE); |
| 1386 | if (status) |
| 1387 | goto err; |
| 1388 | |
| 1389 | return 0; |
| 1390 | |
| 1391 | err: |
| 1392 | if (aeq->virtual_map) { |
| 1393 | irdma_destroy_virt_aeq(rf); |
| 1394 | } else { |
| 1395 | dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, |
| 1396 | aeq->mem.pa); |
| 1397 | aeq->mem.va = NULL; |
| 1398 | } |
| 1399 | |
| 1400 | return status; |
| 1401 | } |
| 1402 | |
| 1403 | /** |
| 1404 | * irdma_setup_aeq - set up the device aeq |
| 1405 | * @rf: RDMA PCI function |
| 1406 | * |
| 1407 | * Create the aeq and configure its msix interrupt vector |
| 1408 | * Return 0 if successful, otherwise return error |
| 1409 | */ |
| 1410 | static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf) |
| 1411 | { |
| 1412 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 1413 | enum irdma_status_code status; |
| 1414 | |
| 1415 | status = irdma_create_aeq(rf); |
| 1416 | if (status) |
| 1417 | return status; |
| 1418 | |
| 1419 | status = irdma_cfg_aeq_vector(rf); |
| 1420 | if (status) { |
| 1421 | irdma_destroy_aeq(rf); |
| 1422 | return status; |
| 1423 | } |
| 1424 | |
| 1425 | if (!rf->msix_shared) |
| 1426 | irdma_ena_intr(dev, rf->iw_msixtbl[0].idx); |
| 1427 | |
| 1428 | return 0; |
| 1429 | } |
| 1430 | |
| 1431 | /** |
| 1432 | * irdma_initialize_ilq - create iwarp local queue for cm |
| 1433 | * @iwdev: irdma device |
| 1434 | * |
| 1435 | * Return 0 if successful, otherwise return error |
| 1436 | */ |
| 1437 | static enum irdma_status_code irdma_initialize_ilq(struct irdma_device *iwdev) |
| 1438 | { |
| 1439 | struct irdma_puda_rsrc_info info = {}; |
| 1440 | enum irdma_status_code status; |
| 1441 | |
| 1442 | info.type = IRDMA_PUDA_RSRC_TYPE_ILQ; |
| 1443 | info.cq_id = 1; |
| 1444 | info.qp_id = 1; |
| 1445 | info.count = 1; |
| 1446 | info.pd_id = 1; |
| 1447 | info.abi_ver = IRDMA_ABI_VER; |
| 1448 | info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); |
| 1449 | info.rq_size = info.sq_size; |
| 1450 | info.buf_size = 1024; |
| 1451 | info.tx_buf_cnt = 2 * info.sq_size; |
| 1452 | info.receive = irdma_receive_ilq; |
| 1453 | info.xmit_complete = irdma_free_sqbuf; |
| 1454 | status = irdma_puda_create_rsrc(&iwdev->vsi, &info); |
| 1455 | if (status) |
| 1456 | ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n"); |
| 1457 | |
| 1458 | return status; |
| 1459 | } |
| 1460 | |
| 1461 | /** |
| 1462 | * irdma_initialize_ieq - create iwarp exception queue |
| 1463 | * @iwdev: irdma device |
| 1464 | * |
| 1465 | * Return 0 if successful, otherwise return error |
| 1466 | */ |
| 1467 | static enum irdma_status_code irdma_initialize_ieq(struct irdma_device *iwdev) |
| 1468 | { |
| 1469 | struct irdma_puda_rsrc_info info = {}; |
| 1470 | enum irdma_status_code status; |
| 1471 | |
| 1472 | info.type = IRDMA_PUDA_RSRC_TYPE_IEQ; |
| 1473 | info.cq_id = 2; |
| 1474 | info.qp_id = iwdev->vsi.exception_lan_q; |
| 1475 | info.count = 1; |
| 1476 | info.pd_id = 2; |
| 1477 | info.abi_ver = IRDMA_ABI_VER; |
| 1478 | info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); |
| 1479 | info.rq_size = info.sq_size; |
| 1480 | info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD; |
| 1481 | info.tx_buf_cnt = 4096; |
| 1482 | status = irdma_puda_create_rsrc(&iwdev->vsi, &info); |
| 1483 | if (status) |
| 1484 | ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n"); |
| 1485 | |
| 1486 | return status; |
| 1487 | } |
| 1488 | |
| 1489 | /** |
| 1490 | * irdma_reinitialize_ieq - destroy and re-create ieq |
| 1491 | * @vsi: VSI structure |
| 1492 | */ |
| 1493 | void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi) |
| 1494 | { |
| 1495 | struct irdma_device *iwdev = vsi->back_vsi; |
| 1496 | struct irdma_pci_f *rf = iwdev->rf; |
| 1497 | |
| 1498 | irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false); |
| 1499 | if (irdma_initialize_ieq(iwdev)) { |
Sindhu Devale | 5b1e985 | 2021-09-16 14:12:19 -0500 | [diff] [blame] | 1500 | iwdev->rf->reset = true; |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 1501 | rf->gen_ops.request_reset(rf); |
| 1502 | } |
| 1503 | } |
| 1504 | |
| 1505 | /** |
| 1506 | * irdma_hmc_setup - create hmc objects for the device |
| 1507 | * @rf: RDMA PCI function |
| 1508 | * |
| 1509 | * Set up the device private memory space for the number and size of |
| 1510 | * the hmc objects and create the objects |
| 1511 | * Return 0 if successful, otherwise return error |
| 1512 | */ |
| 1513 | static enum irdma_status_code irdma_hmc_setup(struct irdma_pci_f *rf) |
| 1514 | { |
| 1515 | enum irdma_status_code status; |
| 1516 | u32 qpcnt; |
| 1517 | |
| 1518 | if (rf->rdma_ver == IRDMA_GEN_1) |
| 1519 | qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2; |
| 1520 | else |
| 1521 | qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; |
| 1522 | |
| 1523 | rf->sd_type = IRDMA_SD_TYPE_DIRECT; |
| 1524 | status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt); |
| 1525 | if (status) |
| 1526 | return status; |
| 1527 | |
| 1528 | status = irdma_create_hmc_objs(rf, true, rf->rdma_ver); |
| 1529 | |
| 1530 | return status; |
| 1531 | } |
| 1532 | |
| 1533 | /** |
| 1534 | * irdma_del_init_mem - deallocate memory resources |
| 1535 | * @rf: RDMA PCI function |
| 1536 | */ |
| 1537 | static void irdma_del_init_mem(struct irdma_pci_f *rf) |
| 1538 | { |
| 1539 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 1540 | |
| 1541 | kfree(dev->hmc_info->sd_table.sd_entry); |
| 1542 | dev->hmc_info->sd_table.sd_entry = NULL; |
| 1543 | kfree(rf->mem_rsrc); |
| 1544 | rf->mem_rsrc = NULL; |
| 1545 | dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, |
| 1546 | rf->obj_mem.pa); |
| 1547 | rf->obj_mem.va = NULL; |
| 1548 | if (rf->rdma_ver != IRDMA_GEN_1) { |
| 1549 | kfree(rf->allocated_ws_nodes); |
| 1550 | rf->allocated_ws_nodes = NULL; |
| 1551 | } |
| 1552 | kfree(rf->ceqlist); |
| 1553 | rf->ceqlist = NULL; |
| 1554 | kfree(rf->iw_msixtbl); |
| 1555 | rf->iw_msixtbl = NULL; |
| 1556 | kfree(rf->hmc_info_mem); |
| 1557 | rf->hmc_info_mem = NULL; |
| 1558 | } |
| 1559 | |
| 1560 | /** |
| 1561 | * irdma_initialize_dev - initialize device |
| 1562 | * @rf: RDMA PCI function |
| 1563 | * |
| 1564 | * Allocate memory for the hmc objects and initialize iwdev |
| 1565 | * Return 0 if successful, otherwise clean up the resources |
| 1566 | * and return error |
| 1567 | */ |
| 1568 | static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf) |
| 1569 | { |
| 1570 | enum irdma_status_code status; |
| 1571 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 1572 | struct irdma_device_init_info info = {}; |
| 1573 | struct irdma_dma_mem mem; |
| 1574 | u32 size; |
| 1575 | |
| 1576 | size = sizeof(struct irdma_hmc_pble_rsrc) + |
| 1577 | sizeof(struct irdma_hmc_info) + |
| 1578 | (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX); |
| 1579 | |
| 1580 | rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); |
| 1581 | if (!rf->hmc_info_mem) |
| 1582 | return IRDMA_ERR_NO_MEMORY; |
| 1583 | |
| 1584 | rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; |
| 1585 | dev->hmc_info = &rf->hw.hmc; |
| 1586 | dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *) |
| 1587 | (rf->pble_rsrc + 1); |
| 1588 | |
| 1589 | status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE, |
| 1590 | IRDMA_FPM_QUERY_BUF_ALIGNMENT_M); |
| 1591 | if (status) |
| 1592 | goto error; |
| 1593 | |
| 1594 | info.fpm_query_buf_pa = mem.pa; |
| 1595 | info.fpm_query_buf = mem.va; |
| 1596 | |
| 1597 | status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE, |
| 1598 | IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M); |
| 1599 | if (status) |
| 1600 | goto error; |
| 1601 | |
| 1602 | info.fpm_commit_buf_pa = mem.pa; |
| 1603 | info.fpm_commit_buf = mem.va; |
| 1604 | |
| 1605 | info.bar0 = rf->hw.hw_addr; |
| 1606 | info.hmc_fn_id = PCI_FUNC(rf->pcidev->devfn); |
| 1607 | info.hw = &rf->hw; |
| 1608 | status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); |
| 1609 | if (status) |
| 1610 | goto error; |
| 1611 | |
| 1612 | return status; |
| 1613 | error: |
| 1614 | kfree(rf->hmc_info_mem); |
| 1615 | rf->hmc_info_mem = NULL; |
| 1616 | |
| 1617 | return status; |
| 1618 | } |
| 1619 | |
| 1620 | /** |
| 1621 | * irdma_rt_deinit_hw - clean up the irdma device resources |
| 1622 | * @iwdev: irdma device |
| 1623 | * |
| 1624 | * remove the mac ip entry and ipv4/ipv6 addresses, destroy the |
| 1625 | * device queues and free the pble and the hmc objects |
| 1626 | */ |
| 1627 | void irdma_rt_deinit_hw(struct irdma_device *iwdev) |
| 1628 | { |
| 1629 | ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state); |
| 1630 | |
| 1631 | switch (iwdev->init_state) { |
| 1632 | case IP_ADDR_REGISTERED: |
| 1633 | if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) |
| 1634 | irdma_del_local_mac_entry(iwdev->rf, |
| 1635 | (u8)iwdev->mac_ip_table_idx); |
| 1636 | fallthrough; |
| 1637 | case AEQ_CREATED: |
| 1638 | case PBLE_CHUNK_MEM: |
| 1639 | case CEQS_CREATED: |
| 1640 | case IEQ_CREATED: |
| 1641 | if (!iwdev->roce_mode) |
| 1642 | irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, |
Sindhu Devale | 5b1e985 | 2021-09-16 14:12:19 -0500 | [diff] [blame] | 1643 | iwdev->rf->reset); |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 1644 | fallthrough; |
| 1645 | case ILQ_CREATED: |
| 1646 | if (!iwdev->roce_mode) |
| 1647 | irdma_puda_dele_rsrc(&iwdev->vsi, |
| 1648 | IRDMA_PUDA_RSRC_TYPE_ILQ, |
Sindhu Devale | 5b1e985 | 2021-09-16 14:12:19 -0500 | [diff] [blame] | 1649 | iwdev->rf->reset); |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 1650 | break; |
| 1651 | default: |
| 1652 | ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state); |
| 1653 | break; |
| 1654 | } |
| 1655 | |
| 1656 | irdma_cleanup_cm_core(&iwdev->cm_core); |
| 1657 | if (iwdev->vsi.pestat) { |
| 1658 | irdma_vsi_stats_free(&iwdev->vsi); |
| 1659 | kfree(iwdev->vsi.pestat); |
| 1660 | } |
| 1661 | if (iwdev->cleanup_wq) |
| 1662 | destroy_workqueue(iwdev->cleanup_wq); |
| 1663 | } |
| 1664 | |
| 1665 | static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf) |
| 1666 | { |
| 1667 | enum irdma_status_code status; |
| 1668 | |
| 1669 | status = irdma_save_msix_info(rf); |
| 1670 | if (status) |
| 1671 | return status; |
| 1672 | |
| 1673 | rf->hw.device = &rf->pcidev->dev; |
| 1674 | rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE); |
| 1675 | rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size, |
| 1676 | &rf->obj_mem.pa, GFP_KERNEL); |
| 1677 | if (!rf->obj_mem.va) { |
| 1678 | status = IRDMA_ERR_NO_MEMORY; |
| 1679 | goto clean_msixtbl; |
| 1680 | } |
| 1681 | |
| 1682 | rf->obj_next = rf->obj_mem; |
| 1683 | status = irdma_initialize_dev(rf); |
| 1684 | if (status) |
| 1685 | goto clean_obj_mem; |
| 1686 | |
| 1687 | return 0; |
| 1688 | |
| 1689 | clean_obj_mem: |
| 1690 | dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, |
| 1691 | rf->obj_mem.pa); |
| 1692 | rf->obj_mem.va = NULL; |
| 1693 | clean_msixtbl: |
| 1694 | kfree(rf->iw_msixtbl); |
| 1695 | rf->iw_msixtbl = NULL; |
| 1696 | return status; |
| 1697 | } |
| 1698 | |
| 1699 | /** |
| 1700 | * irdma_get_used_rsrc - determine resources used internally |
| 1701 | * @iwdev: irdma device |
| 1702 | * |
| 1703 | * Called at the end of open to get all internal allocations |
| 1704 | */ |
| 1705 | static void irdma_get_used_rsrc(struct irdma_device *iwdev) |
| 1706 | { |
| 1707 | iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, |
| 1708 | iwdev->rf->max_pd, 0); |
| 1709 | iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, |
| 1710 | iwdev->rf->max_qp, 0); |
| 1711 | iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, |
| 1712 | iwdev->rf->max_cq, 0); |
| 1713 | iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, |
| 1714 | iwdev->rf->max_mr, 0); |
| 1715 | } |
| 1716 | |
| 1717 | void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) |
| 1718 | { |
| 1719 | enum init_completion_state state = rf->init_state; |
| 1720 | |
| 1721 | rf->init_state = INVALID_STATE; |
| 1722 | if (rf->rsrc_created) { |
| 1723 | irdma_destroy_aeq(rf); |
| 1724 | irdma_destroy_pble_prm(rf->pble_rsrc); |
| 1725 | irdma_del_ceqs(rf); |
| 1726 | rf->rsrc_created = false; |
| 1727 | } |
| 1728 | switch (state) { |
| 1729 | case CEQ0_CREATED: |
| 1730 | irdma_del_ceq_0(rf); |
| 1731 | fallthrough; |
| 1732 | case CCQ_CREATED: |
| 1733 | irdma_destroy_ccq(rf); |
| 1734 | fallthrough; |
| 1735 | case HW_RSRC_INITIALIZED: |
| 1736 | case HMC_OBJS_CREATED: |
| 1737 | irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true, |
| 1738 | rf->reset, rf->rdma_ver); |
| 1739 | fallthrough; |
| 1740 | case CQP_CREATED: |
| 1741 | irdma_destroy_cqp(rf, true); |
| 1742 | fallthrough; |
| 1743 | case INITIAL_STATE: |
| 1744 | irdma_del_init_mem(rf); |
| 1745 | break; |
| 1746 | case INVALID_STATE: |
| 1747 | default: |
| 1748 | ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state); |
| 1749 | break; |
| 1750 | } |
| 1751 | } |
| 1752 | |
| 1753 | /** |
| 1754 | * irdma_rt_init_hw - Initializes runtime portion of HW |
| 1755 | * @iwdev: irdma device |
| 1756 | * @l2params: qos, tc, mtu info from netdev driver |
| 1757 | * |
| 1758 | * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma |
| 1759 | * device resource objects. |
| 1760 | */ |
| 1761 | enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev, |
| 1762 | struct irdma_l2params *l2params) |
| 1763 | { |
| 1764 | struct irdma_pci_f *rf = iwdev->rf; |
| 1765 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 1766 | enum irdma_status_code status; |
| 1767 | struct irdma_vsi_init_info vsi_info = {}; |
| 1768 | struct irdma_vsi_stats_info stats_info = {}; |
| 1769 | |
| 1770 | vsi_info.dev = dev; |
| 1771 | vsi_info.back_vsi = iwdev; |
| 1772 | vsi_info.params = l2params; |
| 1773 | vsi_info.pf_data_vsi_num = iwdev->vsi_num; |
| 1774 | vsi_info.register_qset = rf->gen_ops.register_qset; |
| 1775 | vsi_info.unregister_qset = rf->gen_ops.unregister_qset; |
| 1776 | vsi_info.exception_lan_q = 2; |
| 1777 | irdma_sc_vsi_init(&iwdev->vsi, &vsi_info); |
| 1778 | |
| 1779 | status = irdma_setup_cm_core(iwdev, rf->rdma_ver); |
| 1780 | if (status) |
| 1781 | return status; |
| 1782 | |
| 1783 | stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); |
| 1784 | if (!stats_info.pestat) { |
| 1785 | irdma_cleanup_cm_core(&iwdev->cm_core); |
| 1786 | return IRDMA_ERR_NO_MEMORY; |
| 1787 | } |
| 1788 | stats_info.fcn_id = dev->hmc_fn_id; |
| 1789 | status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info); |
| 1790 | if (status) { |
| 1791 | irdma_cleanup_cm_core(&iwdev->cm_core); |
| 1792 | kfree(stats_info.pestat); |
| 1793 | return status; |
| 1794 | } |
| 1795 | |
| 1796 | do { |
| 1797 | if (!iwdev->roce_mode) { |
| 1798 | status = irdma_initialize_ilq(iwdev); |
| 1799 | if (status) |
| 1800 | break; |
| 1801 | iwdev->init_state = ILQ_CREATED; |
| 1802 | status = irdma_initialize_ieq(iwdev); |
| 1803 | if (status) |
| 1804 | break; |
| 1805 | iwdev->init_state = IEQ_CREATED; |
| 1806 | } |
| 1807 | if (!rf->rsrc_created) { |
| 1808 | status = irdma_setup_ceqs(rf, &iwdev->vsi); |
| 1809 | if (status) |
| 1810 | break; |
| 1811 | |
| 1812 | iwdev->init_state = CEQS_CREATED; |
| 1813 | |
| 1814 | status = irdma_hmc_init_pble(&rf->sc_dev, |
| 1815 | rf->pble_rsrc); |
| 1816 | if (status) { |
| 1817 | irdma_del_ceqs(rf); |
| 1818 | break; |
| 1819 | } |
| 1820 | |
| 1821 | iwdev->init_state = PBLE_CHUNK_MEM; |
| 1822 | |
| 1823 | status = irdma_setup_aeq(rf); |
| 1824 | if (status) { |
| 1825 | irdma_destroy_pble_prm(rf->pble_rsrc); |
| 1826 | irdma_del_ceqs(rf); |
| 1827 | break; |
| 1828 | } |
| 1829 | iwdev->init_state = AEQ_CREATED; |
| 1830 | rf->rsrc_created = true; |
| 1831 | } |
| 1832 | |
| 1833 | iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | |
| 1834 | IB_DEVICE_MEM_WINDOW | |
| 1835 | IB_DEVICE_MEM_MGT_EXTENSIONS; |
| 1836 | |
| 1837 | if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) |
| 1838 | irdma_alloc_set_mac(iwdev); |
| 1839 | irdma_add_ip(iwdev); |
| 1840 | iwdev->init_state = IP_ADDR_REGISTERED; |
| 1841 | |
| 1842 | /* handles asynch cleanup tasks - disconnect CM , free qp, |
| 1843 | * free cq bufs |
| 1844 | */ |
| 1845 | iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq", |
| 1846 | WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); |
| 1847 | if (!iwdev->cleanup_wq) |
| 1848 | return IRDMA_ERR_NO_MEMORY; |
| 1849 | irdma_get_used_rsrc(iwdev); |
| 1850 | init_waitqueue_head(&iwdev->suspend_wq); |
| 1851 | |
| 1852 | return 0; |
| 1853 | } while (0); |
| 1854 | |
| 1855 | dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n", |
| 1856 | status, iwdev->init_state); |
| 1857 | irdma_rt_deinit_hw(iwdev); |
| 1858 | |
| 1859 | return status; |
| 1860 | } |
| 1861 | |
| 1862 | /** |
| 1863 | * irdma_ctrl_init_hw - Initializes control portion of HW |
| 1864 | * @rf: RDMA PCI function |
| 1865 | * |
| 1866 | * Create admin queues, HMC obejcts and RF resource objects |
| 1867 | */ |
| 1868 | enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf) |
| 1869 | { |
| 1870 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 1871 | enum irdma_status_code status; |
| 1872 | do { |
| 1873 | status = irdma_setup_init_state(rf); |
| 1874 | if (status) |
| 1875 | break; |
| 1876 | rf->init_state = INITIAL_STATE; |
| 1877 | |
| 1878 | status = irdma_create_cqp(rf); |
| 1879 | if (status) |
| 1880 | break; |
| 1881 | rf->init_state = CQP_CREATED; |
| 1882 | |
| 1883 | status = irdma_hmc_setup(rf); |
| 1884 | if (status) |
| 1885 | break; |
| 1886 | rf->init_state = HMC_OBJS_CREATED; |
| 1887 | |
| 1888 | status = irdma_initialize_hw_rsrc(rf); |
| 1889 | if (status) |
| 1890 | break; |
| 1891 | rf->init_state = HW_RSRC_INITIALIZED; |
| 1892 | |
| 1893 | status = irdma_create_ccq(rf); |
| 1894 | if (status) |
| 1895 | break; |
| 1896 | rf->init_state = CCQ_CREATED; |
| 1897 | |
| 1898 | dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT; |
| 1899 | if (rf->rdma_ver != IRDMA_GEN_1) { |
| 1900 | status = irdma_get_rdma_features(dev); |
| 1901 | if (status) |
| 1902 | break; |
| 1903 | } |
| 1904 | |
| 1905 | status = irdma_setup_ceq_0(rf); |
| 1906 | if (status) |
| 1907 | break; |
| 1908 | rf->init_state = CEQ0_CREATED; |
| 1909 | /* Handles processing of CQP completions */ |
| 1910 | rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq", |
| 1911 | WQ_HIGHPRI | WQ_UNBOUND); |
| 1912 | if (!rf->cqp_cmpl_wq) { |
| 1913 | status = IRDMA_ERR_NO_MEMORY; |
| 1914 | break; |
| 1915 | } |
| 1916 | INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); |
| 1917 | irdma_sc_ccq_arm(dev->ccq); |
| 1918 | return 0; |
| 1919 | } while (0); |
| 1920 | |
| 1921 | dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n", |
| 1922 | rf->init_state, status); |
| 1923 | irdma_ctrl_deinit_hw(rf); |
| 1924 | return status; |
| 1925 | } |
| 1926 | |
| 1927 | /** |
| 1928 | * irdma_set_hw_rsrc - set hw memory resources. |
| 1929 | * @rf: RDMA PCI function |
| 1930 | */ |
Zhu Yanjun | 41f5fa9 | 2021-07-13 23:11:29 -0400 | [diff] [blame] | 1931 | static void irdma_set_hw_rsrc(struct irdma_pci_f *rf) |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 1932 | { |
| 1933 | rf->allocated_qps = (void *)(rf->mem_rsrc + |
| 1934 | (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); |
| 1935 | rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; |
| 1936 | rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; |
| 1937 | rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; |
| 1938 | rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; |
| 1939 | rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; |
| 1940 | rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; |
| 1941 | rf->qp_table = (struct irdma_qp **) |
| 1942 | (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]); |
| 1943 | |
| 1944 | spin_lock_init(&rf->rsrc_lock); |
| 1945 | spin_lock_init(&rf->arp_lock); |
| 1946 | spin_lock_init(&rf->qptable_lock); |
| 1947 | spin_lock_init(&rf->qh_list_lock); |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 1948 | } |
| 1949 | |
| 1950 | /** |
| 1951 | * irdma_calc_mem_rsrc_size - calculate memory resources size. |
| 1952 | * @rf: RDMA PCI function |
| 1953 | */ |
| 1954 | static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf) |
| 1955 | { |
| 1956 | u32 rsrc_size; |
| 1957 | |
| 1958 | rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size; |
| 1959 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); |
| 1960 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); |
| 1961 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); |
| 1962 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); |
| 1963 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); |
| 1964 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); |
| 1965 | rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); |
| 1966 | rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp; |
| 1967 | |
| 1968 | return rsrc_size; |
| 1969 | } |
| 1970 | |
| 1971 | /** |
| 1972 | * irdma_initialize_hw_rsrc - initialize hw resource tracking array |
| 1973 | * @rf: RDMA PCI function |
| 1974 | */ |
| 1975 | u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) |
| 1976 | { |
| 1977 | u32 rsrc_size; |
| 1978 | u32 mrdrvbits; |
| 1979 | u32 ret; |
| 1980 | |
| 1981 | if (rf->rdma_ver != IRDMA_GEN_1) { |
| 1982 | rf->allocated_ws_nodes = |
| 1983 | kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES), |
| 1984 | sizeof(unsigned long), GFP_KERNEL); |
| 1985 | if (!rf->allocated_ws_nodes) |
| 1986 | return -ENOMEM; |
| 1987 | |
| 1988 | set_bit(0, rf->allocated_ws_nodes); |
| 1989 | rf->max_ws_node_id = IRDMA_MAX_WS_NODES; |
| 1990 | } |
| 1991 | rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; |
| 1992 | rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt; |
| 1993 | rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt; |
| 1994 | rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; |
| 1995 | rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; |
| 1996 | rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt; |
| 1997 | rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt; |
| 1998 | rf->max_mcg = rf->max_qp; |
| 1999 | |
| 2000 | rsrc_size = irdma_calc_mem_rsrc_size(rf); |
| 2001 | rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL); |
| 2002 | if (!rf->mem_rsrc) { |
| 2003 | ret = -ENOMEM; |
| 2004 | goto mem_rsrc_kzalloc_fail; |
| 2005 | } |
| 2006 | |
| 2007 | rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; |
| 2008 | |
Zhu Yanjun | 41f5fa9 | 2021-07-13 23:11:29 -0400 | [diff] [blame] | 2009 | irdma_set_hw_rsrc(rf); |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 2010 | |
| 2011 | set_bit(0, rf->allocated_mrs); |
| 2012 | set_bit(0, rf->allocated_qps); |
| 2013 | set_bit(0, rf->allocated_cqs); |
| 2014 | set_bit(0, rf->allocated_pds); |
| 2015 | set_bit(0, rf->allocated_arps); |
| 2016 | set_bit(0, rf->allocated_ahs); |
| 2017 | set_bit(0, rf->allocated_mcgs); |
| 2018 | set_bit(2, rf->allocated_qps); /* qp 2 IEQ */ |
| 2019 | set_bit(1, rf->allocated_qps); /* qp 1 ILQ */ |
| 2020 | set_bit(1, rf->allocated_cqs); |
| 2021 | set_bit(1, rf->allocated_pds); |
| 2022 | set_bit(2, rf->allocated_cqs); |
| 2023 | set_bit(2, rf->allocated_pds); |
| 2024 | |
| 2025 | INIT_LIST_HEAD(&rf->mc_qht_list.list); |
| 2026 | /* stag index mask has a minimum of 14 bits */ |
| 2027 | mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); |
| 2028 | rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); |
| 2029 | |
| 2030 | return 0; |
| 2031 | |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 2032 | mem_rsrc_kzalloc_fail: |
| 2033 | kfree(rf->allocated_ws_nodes); |
| 2034 | rf->allocated_ws_nodes = NULL; |
| 2035 | |
| 2036 | return ret; |
| 2037 | } |
| 2038 | |
| 2039 | /** |
| 2040 | * irdma_cqp_ce_handler - handle cqp completions |
| 2041 | * @rf: RDMA PCI function |
| 2042 | * @cq: cq for cqp completions |
| 2043 | */ |
| 2044 | void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) |
| 2045 | { |
| 2046 | struct irdma_cqp_request *cqp_request; |
| 2047 | struct irdma_sc_dev *dev = &rf->sc_dev; |
| 2048 | u32 cqe_count = 0; |
| 2049 | struct irdma_ccq_cqe_info info; |
| 2050 | unsigned long flags; |
| 2051 | int ret; |
| 2052 | |
| 2053 | do { |
| 2054 | memset(&info, 0, sizeof(info)); |
| 2055 | spin_lock_irqsave(&rf->cqp.compl_lock, flags); |
| 2056 | ret = irdma_sc_ccq_get_cqe_info(cq, &info); |
| 2057 | spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); |
| 2058 | if (ret) |
| 2059 | break; |
| 2060 | |
| 2061 | cqp_request = (struct irdma_cqp_request *) |
| 2062 | (unsigned long)info.scratch; |
| 2063 | if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd, |
| 2064 | info.maj_err_code, |
| 2065 | info.min_err_code)) |
| 2066 | ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", |
| 2067 | info.op_code, info.maj_err_code, info.min_err_code); |
| 2068 | if (cqp_request) { |
| 2069 | cqp_request->compl_info.maj_err_code = info.maj_err_code; |
| 2070 | cqp_request->compl_info.min_err_code = info.min_err_code; |
| 2071 | cqp_request->compl_info.op_ret_val = info.op_ret_val; |
| 2072 | cqp_request->compl_info.error = info.error; |
| 2073 | |
| 2074 | if (cqp_request->waiting) { |
| 2075 | cqp_request->request_done = true; |
| 2076 | wake_up(&cqp_request->waitq); |
| 2077 | irdma_put_cqp_request(&rf->cqp, cqp_request); |
| 2078 | } else { |
| 2079 | if (cqp_request->callback_fcn) |
| 2080 | cqp_request->callback_fcn(cqp_request); |
| 2081 | irdma_put_cqp_request(&rf->cqp, cqp_request); |
| 2082 | } |
| 2083 | } |
| 2084 | |
| 2085 | cqe_count++; |
| 2086 | } while (1); |
| 2087 | |
| 2088 | if (cqe_count) { |
| 2089 | irdma_process_bh(dev); |
| 2090 | irdma_sc_ccq_arm(cq); |
| 2091 | } |
| 2092 | } |
| 2093 | |
| 2094 | /** |
| 2095 | * cqp_compl_worker - Handle cqp completions |
| 2096 | * @work: Pointer to work structure |
| 2097 | */ |
| 2098 | void cqp_compl_worker(struct work_struct *work) |
| 2099 | { |
| 2100 | struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f, |
| 2101 | cqp_cmpl_work); |
| 2102 | struct irdma_sc_cq *cq = &rf->ccq.sc_cq; |
| 2103 | |
| 2104 | irdma_cqp_ce_handler(rf, cq); |
| 2105 | } |
| 2106 | |
| 2107 | /** |
| 2108 | * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port |
| 2109 | * @cm_core: cm's core |
| 2110 | * @port: port to identify apbvt entry |
| 2111 | */ |
| 2112 | static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core, |
| 2113 | u16 port) |
| 2114 | { |
| 2115 | struct irdma_apbvt_entry *entry; |
| 2116 | |
| 2117 | hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) { |
| 2118 | if (entry->port == port) { |
| 2119 | entry->use_cnt++; |
| 2120 | return entry; |
| 2121 | } |
| 2122 | } |
| 2123 | |
| 2124 | return NULL; |
| 2125 | } |
| 2126 | |
| 2127 | /** |
| 2128 | * irdma_next_iw_state - modify qp state |
| 2129 | * @iwqp: iwarp qp to modify |
| 2130 | * @state: next state for qp |
| 2131 | * @del_hash: del hash |
| 2132 | * @term: term message |
| 2133 | * @termlen: length of term message |
| 2134 | */ |
| 2135 | void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term, |
| 2136 | u8 termlen) |
| 2137 | { |
| 2138 | struct irdma_modify_qp_info info = {}; |
| 2139 | |
| 2140 | info.next_iwarp_state = state; |
| 2141 | info.remove_hash_idx = del_hash; |
| 2142 | info.cq_num_valid = true; |
| 2143 | info.arp_cache_idx_valid = true; |
| 2144 | info.dont_send_term = true; |
| 2145 | info.dont_send_fin = true; |
| 2146 | info.termlen = termlen; |
| 2147 | |
| 2148 | if (term & IRDMAQP_TERM_SEND_TERM_ONLY) |
| 2149 | info.dont_send_term = false; |
| 2150 | if (term & IRDMAQP_TERM_SEND_FIN_ONLY) |
| 2151 | info.dont_send_fin = false; |
| 2152 | if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR) |
| 2153 | info.reset_tcp_conn = true; |
| 2154 | iwqp->hw_iwarp_state = state; |
| 2155 | irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); |
| 2156 | iwqp->iwarp_state = info.next_iwarp_state; |
| 2157 | } |
| 2158 | |
| 2159 | /** |
| 2160 | * irdma_del_local_mac_entry - remove a mac entry from the hw |
| 2161 | * table |
| 2162 | * @rf: RDMA PCI function |
| 2163 | * @idx: the index of the mac ip address to delete |
| 2164 | */ |
| 2165 | void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) |
| 2166 | { |
| 2167 | struct irdma_cqp *iwcqp = &rf->cqp; |
| 2168 | struct irdma_cqp_request *cqp_request; |
| 2169 | struct cqp_cmds_info *cqp_info; |
| 2170 | |
| 2171 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); |
| 2172 | if (!cqp_request) |
| 2173 | return; |
| 2174 | |
| 2175 | cqp_info = &cqp_request->info; |
| 2176 | cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY; |
| 2177 | cqp_info->post_sq = 1; |
| 2178 | cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp; |
| 2179 | cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request; |
| 2180 | cqp_info->in.u.del_local_mac_entry.entry_idx = idx; |
| 2181 | cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0; |
| 2182 | |
| 2183 | irdma_handle_cqp_op(rf, cqp_request); |
| 2184 | irdma_put_cqp_request(iwcqp, cqp_request); |
| 2185 | } |
| 2186 | |
| 2187 | /** |
| 2188 | * irdma_add_local_mac_entry - add a mac ip address entry to the |
| 2189 | * hw table |
| 2190 | * @rf: RDMA PCI function |
| 2191 | * @mac_addr: pointer to mac address |
| 2192 | * @idx: the index of the mac ip address to add |
| 2193 | */ |
Jakub Kicinski | fd92213 | 2021-10-19 11:26:04 -0700 | [diff] [blame] | 2194 | int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx) |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 2195 | { |
| 2196 | struct irdma_local_mac_entry_info *info; |
| 2197 | struct irdma_cqp *iwcqp = &rf->cqp; |
| 2198 | struct irdma_cqp_request *cqp_request; |
| 2199 | struct cqp_cmds_info *cqp_info; |
| 2200 | enum irdma_status_code status; |
| 2201 | |
| 2202 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); |
| 2203 | if (!cqp_request) |
| 2204 | return IRDMA_ERR_NO_MEMORY; |
| 2205 | |
| 2206 | cqp_info = &cqp_request->info; |
| 2207 | cqp_info->post_sq = 1; |
| 2208 | info = &cqp_info->in.u.add_local_mac_entry.info; |
| 2209 | ether_addr_copy(info->mac_addr, mac_addr); |
| 2210 | info->entry_idx = idx; |
| 2211 | cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request; |
| 2212 | cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY; |
| 2213 | cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp; |
| 2214 | cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request; |
| 2215 | |
| 2216 | status = irdma_handle_cqp_op(rf, cqp_request); |
| 2217 | irdma_put_cqp_request(iwcqp, cqp_request); |
| 2218 | |
| 2219 | return status; |
| 2220 | } |
| 2221 | |
| 2222 | /** |
| 2223 | * irdma_alloc_local_mac_entry - allocate a mac entry |
| 2224 | * @rf: RDMA PCI function |
| 2225 | * @mac_tbl_idx: the index of the new mac address |
| 2226 | * |
| 2227 | * Allocate a mac address entry and update the mac_tbl_idx |
| 2228 | * to hold the index of the newly created mac address |
| 2229 | * Return 0 if successful, otherwise return error |
| 2230 | */ |
| 2231 | int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) |
| 2232 | { |
| 2233 | struct irdma_cqp *iwcqp = &rf->cqp; |
| 2234 | struct irdma_cqp_request *cqp_request; |
| 2235 | struct cqp_cmds_info *cqp_info; |
| 2236 | enum irdma_status_code status = 0; |
| 2237 | |
| 2238 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); |
| 2239 | if (!cqp_request) |
| 2240 | return IRDMA_ERR_NO_MEMORY; |
| 2241 | |
| 2242 | cqp_info = &cqp_request->info; |
| 2243 | cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY; |
| 2244 | cqp_info->post_sq = 1; |
| 2245 | cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp; |
| 2246 | cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request; |
| 2247 | status = irdma_handle_cqp_op(rf, cqp_request); |
| 2248 | if (!status) |
| 2249 | *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val; |
| 2250 | |
| 2251 | irdma_put_cqp_request(iwcqp, cqp_request); |
| 2252 | |
| 2253 | return status; |
| 2254 | } |
| 2255 | |
| 2256 | /** |
| 2257 | * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt |
| 2258 | * @iwdev: irdma device |
| 2259 | * @accel_local_port: port for apbvt |
| 2260 | * @add_port: add ordelete port |
| 2261 | */ |
| 2262 | static enum irdma_status_code |
| 2263 | irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, u16 accel_local_port, |
| 2264 | bool add_port) |
| 2265 | { |
| 2266 | struct irdma_apbvt_info *info; |
| 2267 | struct irdma_cqp_request *cqp_request; |
| 2268 | struct cqp_cmds_info *cqp_info; |
| 2269 | enum irdma_status_code status; |
| 2270 | |
| 2271 | cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port); |
| 2272 | if (!cqp_request) |
| 2273 | return IRDMA_ERR_NO_MEMORY; |
| 2274 | |
| 2275 | cqp_info = &cqp_request->info; |
| 2276 | info = &cqp_info->in.u.manage_apbvt_entry.info; |
| 2277 | memset(info, 0, sizeof(*info)); |
| 2278 | info->add = add_port; |
| 2279 | info->port = accel_local_port; |
| 2280 | cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY; |
| 2281 | cqp_info->post_sq = 1; |
| 2282 | cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp; |
| 2283 | cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request; |
| 2284 | ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n", |
| 2285 | (!add_port) ? "DELETE" : "ADD", accel_local_port); |
| 2286 | |
| 2287 | status = irdma_handle_cqp_op(iwdev->rf, cqp_request); |
| 2288 | irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); |
| 2289 | |
| 2290 | return status; |
| 2291 | } |
| 2292 | |
| 2293 | /** |
| 2294 | * irdma_add_apbvt - add tcp port to HW apbvt table |
| 2295 | * @iwdev: irdma device |
| 2296 | * @port: port for apbvt |
| 2297 | */ |
| 2298 | struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port) |
| 2299 | { |
| 2300 | struct irdma_cm_core *cm_core = &iwdev->cm_core; |
| 2301 | struct irdma_apbvt_entry *entry; |
| 2302 | unsigned long flags; |
| 2303 | |
| 2304 | spin_lock_irqsave(&cm_core->apbvt_lock, flags); |
| 2305 | entry = irdma_lookup_apbvt_entry(cm_core, port); |
| 2306 | if (entry) { |
| 2307 | spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); |
| 2308 | return entry; |
| 2309 | } |
| 2310 | |
| 2311 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
| 2312 | if (!entry) { |
| 2313 | spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); |
| 2314 | return NULL; |
| 2315 | } |
| 2316 | |
| 2317 | entry->port = port; |
| 2318 | entry->use_cnt = 1; |
| 2319 | hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port); |
| 2320 | spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); |
| 2321 | |
| 2322 | if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) { |
| 2323 | kfree(entry); |
| 2324 | return NULL; |
| 2325 | } |
| 2326 | |
| 2327 | return entry; |
| 2328 | } |
| 2329 | |
| 2330 | /** |
| 2331 | * irdma_del_apbvt - delete tcp port from HW apbvt table |
| 2332 | * @iwdev: irdma device |
| 2333 | * @entry: apbvt entry object |
| 2334 | */ |
| 2335 | void irdma_del_apbvt(struct irdma_device *iwdev, |
| 2336 | struct irdma_apbvt_entry *entry) |
| 2337 | { |
| 2338 | struct irdma_cm_core *cm_core = &iwdev->cm_core; |
| 2339 | unsigned long flags; |
| 2340 | |
| 2341 | spin_lock_irqsave(&cm_core->apbvt_lock, flags); |
| 2342 | if (--entry->use_cnt) { |
| 2343 | spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); |
| 2344 | return; |
| 2345 | } |
| 2346 | |
| 2347 | hash_del(&entry->hlist); |
| 2348 | /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to |
| 2349 | * protect against race where add APBVT CQP can race ahead of the delete |
| 2350 | * APBVT for same port. |
| 2351 | */ |
| 2352 | irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false); |
| 2353 | kfree(entry); |
| 2354 | spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); |
| 2355 | } |
| 2356 | |
| 2357 | /** |
| 2358 | * irdma_manage_arp_cache - manage hw arp cache |
| 2359 | * @rf: RDMA PCI function |
| 2360 | * @mac_addr: mac address ptr |
| 2361 | * @ip_addr: ip addr for arp cache |
| 2362 | * @ipv4: flag inicating IPv4 |
| 2363 | * @action: add, delete or modify |
| 2364 | */ |
Jakub Kicinski | fd92213 | 2021-10-19 11:26:04 -0700 | [diff] [blame] | 2365 | void irdma_manage_arp_cache(struct irdma_pci_f *rf, |
| 2366 | const unsigned char *mac_addr, |
Mustafa Ismail | 44d9e52 | 2021-06-02 15:51:24 -0500 | [diff] [blame] | 2367 | u32 *ip_addr, bool ipv4, u32 action) |
| 2368 | { |
| 2369 | struct irdma_add_arp_cache_entry_info *info; |
| 2370 | struct irdma_cqp_request *cqp_request; |
| 2371 | struct cqp_cmds_info *cqp_info; |
| 2372 | int arp_index; |
| 2373 | |
| 2374 | arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action); |
| 2375 | if (arp_index == -1) |
| 2376 | return; |
| 2377 | |
| 2378 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); |
| 2379 | if (!cqp_request) |
| 2380 | return; |
| 2381 | |
| 2382 | cqp_info = &cqp_request->info; |
| 2383 | if (action == IRDMA_ARP_ADD) { |
| 2384 | cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY; |
| 2385 | info = &cqp_info->in.u.add_arp_cache_entry.info; |
| 2386 | memset(info, 0, sizeof(*info)); |
| 2387 | info->arp_index = (u16)arp_index; |
| 2388 | info->permanent = true; |
| 2389 | ether_addr_copy(info->mac_addr, mac_addr); |
| 2390 | cqp_info->in.u.add_arp_cache_entry.scratch = |
| 2391 | (uintptr_t)cqp_request; |
| 2392 | cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp; |
| 2393 | } else { |
| 2394 | cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY; |
| 2395 | cqp_info->in.u.del_arp_cache_entry.scratch = |
| 2396 | (uintptr_t)cqp_request; |
| 2397 | cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp; |
| 2398 | cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index; |
| 2399 | } |
| 2400 | |
| 2401 | cqp_info->post_sq = 1; |
| 2402 | irdma_handle_cqp_op(rf, cqp_request); |
| 2403 | irdma_put_cqp_request(&rf->cqp, cqp_request); |
| 2404 | } |
| 2405 | |
| 2406 | /** |
| 2407 | * irdma_send_syn_cqp_callback - do syn/ack after qhash |
| 2408 | * @cqp_request: qhash cqp completion |
| 2409 | */ |
| 2410 | static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request) |
| 2411 | { |
| 2412 | struct irdma_cm_node *cm_node = cqp_request->param; |
| 2413 | |
| 2414 | irdma_send_syn(cm_node, 1); |
| 2415 | irdma_rem_ref_cm_node(cm_node); |
| 2416 | } |
| 2417 | |
| 2418 | /** |
| 2419 | * irdma_manage_qhash - add or modify qhash |
| 2420 | * @iwdev: irdma device |
| 2421 | * @cminfo: cm info for qhash |
| 2422 | * @etype: type (syn or quad) |
| 2423 | * @mtype: type of qhash |
| 2424 | * @cmnode: cmnode associated with connection |
| 2425 | * @wait: wait for completion |
| 2426 | */ |
| 2427 | enum irdma_status_code |
| 2428 | irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, |
| 2429 | enum irdma_quad_entry_type etype, |
| 2430 | enum irdma_quad_hash_manage_type mtype, void *cmnode, |
| 2431 | bool wait) |
| 2432 | { |
| 2433 | struct irdma_qhash_table_info *info; |
| 2434 | enum irdma_status_code status; |
| 2435 | struct irdma_cqp *iwcqp = &iwdev->rf->cqp; |
| 2436 | struct irdma_cqp_request *cqp_request; |
| 2437 | struct cqp_cmds_info *cqp_info; |
| 2438 | struct irdma_cm_node *cm_node = cmnode; |
| 2439 | |
| 2440 | cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); |
| 2441 | if (!cqp_request) |
| 2442 | return IRDMA_ERR_NO_MEMORY; |
| 2443 | |
| 2444 | cqp_info = &cqp_request->info; |
| 2445 | info = &cqp_info->in.u.manage_qhash_table_entry.info; |
| 2446 | memset(info, 0, sizeof(*info)); |
| 2447 | info->vsi = &iwdev->vsi; |
| 2448 | info->manage = mtype; |
| 2449 | info->entry_type = etype; |
| 2450 | if (cminfo->vlan_id < VLAN_N_VID) { |
| 2451 | info->vlan_valid = true; |
| 2452 | info->vlan_id = cminfo->vlan_id; |
| 2453 | } else { |
| 2454 | info->vlan_valid = false; |
| 2455 | } |
| 2456 | info->ipv4_valid = cminfo->ipv4; |
| 2457 | info->user_pri = cminfo->user_pri; |
| 2458 | ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr); |
| 2459 | info->qp_num = cminfo->qh_qpid; |
| 2460 | info->dest_port = cminfo->loc_port; |
| 2461 | info->dest_ip[0] = cminfo->loc_addr[0]; |
| 2462 | info->dest_ip[1] = cminfo->loc_addr[1]; |
| 2463 | info->dest_ip[2] = cminfo->loc_addr[2]; |
| 2464 | info->dest_ip[3] = cminfo->loc_addr[3]; |
| 2465 | if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED || |
| 2466 | etype == IRDMA_QHASH_TYPE_UDP_UNICAST || |
| 2467 | etype == IRDMA_QHASH_TYPE_UDP_MCAST || |
| 2468 | etype == IRDMA_QHASH_TYPE_ROCE_MCAST || |
| 2469 | etype == IRDMA_QHASH_TYPE_ROCEV2_HW) { |
| 2470 | info->src_port = cminfo->rem_port; |
| 2471 | info->src_ip[0] = cminfo->rem_addr[0]; |
| 2472 | info->src_ip[1] = cminfo->rem_addr[1]; |
| 2473 | info->src_ip[2] = cminfo->rem_addr[2]; |
| 2474 | info->src_ip[3] = cminfo->rem_addr[3]; |
| 2475 | } |
| 2476 | if (cmnode) { |
| 2477 | cqp_request->callback_fcn = irdma_send_syn_cqp_callback; |
| 2478 | cqp_request->param = cmnode; |
| 2479 | if (!wait) |
| 2480 | refcount_inc(&cm_node->refcnt); |
| 2481 | } |
| 2482 | if (info->ipv4_valid) |
| 2483 | ibdev_dbg(&iwdev->ibdev, |
| 2484 | "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n", |
| 2485 | (!mtype) ? "DELETE" : "ADD", |
| 2486 | __builtin_return_address(0), info->dest_port, |
| 2487 | info->src_port, info->dest_ip, info->src_ip, |
| 2488 | info->mac_addr, cminfo->vlan_id, |
| 2489 | cmnode ? cmnode : NULL); |
| 2490 | else |
| 2491 | ibdev_dbg(&iwdev->ibdev, |
| 2492 | "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n", |
| 2493 | (!mtype) ? "DELETE" : "ADD", |
| 2494 | __builtin_return_address(0), info->dest_port, |
| 2495 | info->src_port, info->dest_ip, info->src_ip, |
| 2496 | info->mac_addr, cminfo->vlan_id, |
| 2497 | cmnode ? cmnode : NULL); |
| 2498 | |
| 2499 | cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp; |
| 2500 | cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request; |
| 2501 | cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY; |
| 2502 | cqp_info->post_sq = 1; |
| 2503 | status = irdma_handle_cqp_op(iwdev->rf, cqp_request); |
| 2504 | if (status && cm_node && !wait) |
| 2505 | irdma_rem_ref_cm_node(cm_node); |
| 2506 | |
| 2507 | irdma_put_cqp_request(iwcqp, cqp_request); |
| 2508 | |
| 2509 | return status; |
| 2510 | } |
| 2511 | |
| 2512 | /** |
| 2513 | * irdma_hw_flush_wqes_callback - Check return code after flush |
| 2514 | * @cqp_request: qhash cqp completion |
| 2515 | */ |
| 2516 | static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request) |
| 2517 | { |
| 2518 | struct irdma_qp_flush_info *hw_info; |
| 2519 | struct irdma_sc_qp *qp; |
| 2520 | struct irdma_qp *iwqp; |
| 2521 | struct cqp_cmds_info *cqp_info; |
| 2522 | |
| 2523 | cqp_info = &cqp_request->info; |
| 2524 | hw_info = &cqp_info->in.u.qp_flush_wqes.info; |
| 2525 | qp = cqp_info->in.u.qp_flush_wqes.qp; |
| 2526 | iwqp = qp->qp_uk.back_qp; |
| 2527 | |
| 2528 | if (cqp_request->compl_info.maj_err_code) |
| 2529 | return; |
| 2530 | |
| 2531 | if (hw_info->rq && |
| 2532 | (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || |
| 2533 | cqp_request->compl_info.min_err_code == 0)) { |
| 2534 | /* RQ WQE flush was requested but did not happen */ |
| 2535 | qp->qp_uk.rq_flush_complete = true; |
| 2536 | } |
| 2537 | if (hw_info->sq && |
| 2538 | (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED || |
| 2539 | cqp_request->compl_info.min_err_code == 0)) { |
| 2540 | if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) { |
| 2541 | ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work", |
| 2542 | qp->qp_uk.qp_id); |
| 2543 | irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC); |
| 2544 | } |
| 2545 | qp->qp_uk.sq_flush_complete = true; |
| 2546 | } |
| 2547 | } |
| 2548 | |
| 2549 | /** |
| 2550 | * irdma_hw_flush_wqes - flush qp's wqe |
| 2551 | * @rf: RDMA PCI function |
| 2552 | * @qp: hardware control qp |
| 2553 | * @info: info for flush |
| 2554 | * @wait: flag wait for completion |
| 2555 | */ |
| 2556 | enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf, |
| 2557 | struct irdma_sc_qp *qp, |
| 2558 | struct irdma_qp_flush_info *info, |
| 2559 | bool wait) |
| 2560 | { |
| 2561 | enum irdma_status_code status; |
| 2562 | struct irdma_qp_flush_info *hw_info; |
| 2563 | struct irdma_cqp_request *cqp_request; |
| 2564 | struct cqp_cmds_info *cqp_info; |
| 2565 | struct irdma_qp *iwqp = qp->qp_uk.back_qp; |
| 2566 | |
| 2567 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); |
| 2568 | if (!cqp_request) |
| 2569 | return IRDMA_ERR_NO_MEMORY; |
| 2570 | |
| 2571 | cqp_info = &cqp_request->info; |
| 2572 | if (!wait) |
| 2573 | cqp_request->callback_fcn = irdma_hw_flush_wqes_callback; |
| 2574 | hw_info = &cqp_request->info.in.u.qp_flush_wqes.info; |
| 2575 | memcpy(hw_info, info, sizeof(*hw_info)); |
| 2576 | cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES; |
| 2577 | cqp_info->post_sq = 1; |
| 2578 | cqp_info->in.u.qp_flush_wqes.qp = qp; |
| 2579 | cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request; |
| 2580 | status = irdma_handle_cqp_op(rf, cqp_request); |
| 2581 | if (status) { |
| 2582 | qp->qp_uk.sq_flush_complete = true; |
| 2583 | qp->qp_uk.rq_flush_complete = true; |
| 2584 | irdma_put_cqp_request(&rf->cqp, cqp_request); |
| 2585 | return status; |
| 2586 | } |
| 2587 | |
| 2588 | if (!wait || cqp_request->compl_info.maj_err_code) |
| 2589 | goto put_cqp; |
| 2590 | |
| 2591 | if (info->rq) { |
| 2592 | if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || |
| 2593 | cqp_request->compl_info.min_err_code == 0) { |
| 2594 | /* RQ WQE flush was requested but did not happen */ |
| 2595 | qp->qp_uk.rq_flush_complete = true; |
| 2596 | } |
| 2597 | } |
| 2598 | if (info->sq) { |
| 2599 | if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED || |
| 2600 | cqp_request->compl_info.min_err_code == 0) { |
| 2601 | /* |
| 2602 | * Handling case where WQE is posted to empty SQ when |
| 2603 | * flush has not completed |
| 2604 | */ |
| 2605 | if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) { |
| 2606 | struct irdma_cqp_request *new_req; |
| 2607 | |
| 2608 | if (!qp->qp_uk.sq_flush_complete) |
| 2609 | goto put_cqp; |
| 2610 | qp->qp_uk.sq_flush_complete = false; |
| 2611 | qp->flush_sq = false; |
| 2612 | |
| 2613 | info->rq = false; |
| 2614 | info->sq = true; |
| 2615 | new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true); |
| 2616 | if (!new_req) { |
| 2617 | status = IRDMA_ERR_NO_MEMORY; |
| 2618 | goto put_cqp; |
| 2619 | } |
| 2620 | cqp_info = &new_req->info; |
| 2621 | hw_info = &new_req->info.in.u.qp_flush_wqes.info; |
| 2622 | memcpy(hw_info, info, sizeof(*hw_info)); |
| 2623 | cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES; |
| 2624 | cqp_info->post_sq = 1; |
| 2625 | cqp_info->in.u.qp_flush_wqes.qp = qp; |
| 2626 | cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req; |
| 2627 | |
| 2628 | status = irdma_handle_cqp_op(rf, new_req); |
| 2629 | if (new_req->compl_info.maj_err_code || |
| 2630 | new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED || |
| 2631 | status) { |
| 2632 | ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d", |
| 2633 | iwqp->ibqp.qp_num); |
| 2634 | qp->qp_uk.sq_flush_complete = false; |
| 2635 | irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC); |
| 2636 | } |
| 2637 | irdma_put_cqp_request(&rf->cqp, new_req); |
| 2638 | } else { |
| 2639 | /* SQ WQE flush was requested but did not happen */ |
| 2640 | qp->qp_uk.sq_flush_complete = true; |
| 2641 | } |
| 2642 | } else { |
| 2643 | if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) |
| 2644 | qp->qp_uk.sq_flush_complete = true; |
| 2645 | } |
| 2646 | } |
| 2647 | |
| 2648 | ibdev_dbg(&rf->iwdev->ibdev, |
| 2649 | "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n", |
| 2650 | iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state, |
| 2651 | iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state, |
| 2652 | cqp_request->compl_info.maj_err_code, |
| 2653 | cqp_request->compl_info.min_err_code); |
| 2654 | put_cqp: |
| 2655 | irdma_put_cqp_request(&rf->cqp, cqp_request); |
| 2656 | |
| 2657 | return status; |
| 2658 | } |
| 2659 | |
| 2660 | /** |
| 2661 | * irdma_gen_ae - generate AE |
| 2662 | * @rf: RDMA PCI function |
| 2663 | * @qp: qp associated with AE |
| 2664 | * @info: info for ae |
| 2665 | * @wait: wait for completion |
| 2666 | */ |
| 2667 | void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, |
| 2668 | struct irdma_gen_ae_info *info, bool wait) |
| 2669 | { |
| 2670 | struct irdma_gen_ae_info *ae_info; |
| 2671 | struct irdma_cqp_request *cqp_request; |
| 2672 | struct cqp_cmds_info *cqp_info; |
| 2673 | |
| 2674 | cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); |
| 2675 | if (!cqp_request) |
| 2676 | return; |
| 2677 | |
| 2678 | cqp_info = &cqp_request->info; |
| 2679 | ae_info = &cqp_request->info.in.u.gen_ae.info; |
| 2680 | memcpy(ae_info, info, sizeof(*ae_info)); |
| 2681 | cqp_info->cqp_cmd = IRDMA_OP_GEN_AE; |
| 2682 | cqp_info->post_sq = 1; |
| 2683 | cqp_info->in.u.gen_ae.qp = qp; |
| 2684 | cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request; |
| 2685 | |
| 2686 | irdma_handle_cqp_op(rf, cqp_request); |
| 2687 | irdma_put_cqp_request(&rf->cqp, cqp_request); |
| 2688 | } |
| 2689 | |
| 2690 | void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask) |
| 2691 | { |
| 2692 | struct irdma_qp_flush_info info = {}; |
| 2693 | struct irdma_pci_f *rf = iwqp->iwdev->rf; |
| 2694 | u8 flush_code = iwqp->sc_qp.flush_code; |
| 2695 | |
| 2696 | if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ)) |
| 2697 | return; |
| 2698 | |
| 2699 | /* Set flush info fields*/ |
| 2700 | info.sq = flush_mask & IRDMA_FLUSH_SQ; |
| 2701 | info.rq = flush_mask & IRDMA_FLUSH_RQ; |
| 2702 | |
| 2703 | if (flush_mask & IRDMA_REFLUSH) { |
| 2704 | if (info.sq) |
| 2705 | iwqp->sc_qp.flush_sq = false; |
| 2706 | if (info.rq) |
| 2707 | iwqp->sc_qp.flush_rq = false; |
| 2708 | } |
| 2709 | |
| 2710 | /* Generate userflush errors in CQE */ |
| 2711 | info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR; |
| 2712 | info.sq_minor_code = FLUSH_GENERAL_ERR; |
| 2713 | info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR; |
| 2714 | info.rq_minor_code = FLUSH_GENERAL_ERR; |
| 2715 | info.userflushcode = true; |
| 2716 | if (flush_code) { |
| 2717 | if (info.sq && iwqp->sc_qp.sq_flush_code) |
| 2718 | info.sq_minor_code = flush_code; |
| 2719 | if (info.rq && iwqp->sc_qp.rq_flush_code) |
| 2720 | info.rq_minor_code = flush_code; |
| 2721 | } |
| 2722 | |
| 2723 | /* Issue flush */ |
| 2724 | (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info, |
| 2725 | flush_mask & IRDMA_FLUSH_WAIT); |
| 2726 | iwqp->flush_issued = true; |
| 2727 | } |