Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 1 | /******************************************************************* |
| 2 | * This file is part of the Emulex RoCE Device Driver for * |
| 3 | * RoCE (RDMA over Converged Ethernet) adapters. * |
| 4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * |
| 5 | * EMULEX and SLI are trademarks of Emulex. * |
| 6 | * www.emulex.com * |
| 7 | * * |
| 8 | * This program is free software; you can redistribute it and/or * |
| 9 | * modify it under the terms of version 2 of the GNU General * |
| 10 | * Public License as published by the Free Software Foundation. * |
| 11 | * This program is distributed in the hope that it will be useful. * |
| 12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
| 13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
| 14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
| 15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
| 16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
| 17 | * more details, a copy of which can be found in the file COPYING * |
| 18 | * included with this package. * |
| 19 | * |
| 20 | * Contact Information: |
| 21 | * linux-drivers@emulex.com |
| 22 | * |
| 23 | * Emulex |
| 24 | * 3333 Susan Street |
| 25 | * Costa Mesa, CA 92626 |
| 26 | *******************************************************************/ |
| 27 | |
| 28 | #ifndef __OCRDMA_H__ |
| 29 | #define __OCRDMA_H__ |
| 30 | |
| 31 | #include <linux/mutex.h> |
| 32 | #include <linux/list.h> |
| 33 | #include <linux/spinlock.h> |
| 34 | #include <linux/pci.h> |
| 35 | |
| 36 | #include <rdma/ib_verbs.h> |
| 37 | #include <rdma/ib_user_verbs.h> |
| 38 | |
| 39 | #include <be_roce.h> |
| 40 | #include "ocrdma_sli.h" |
| 41 | |
| 42 | #define OCRDMA_ROCE_DEV_VERSION "1.0.0" |
| 43 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" |
| 44 | |
| 45 | #define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg) |
| 46 | |
| 47 | #define OCRDMA_MAX_AH 512 |
| 48 | |
| 49 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) |
| 50 | |
| 51 | struct ocrdma_dev_attr { |
| 52 | u8 fw_ver[32]; |
| 53 | u32 vendor_id; |
| 54 | u32 device_id; |
| 55 | u16 max_pd; |
| 56 | u16 max_cq; |
| 57 | u16 max_cqe; |
| 58 | u16 max_qp; |
| 59 | u16 max_wqe; |
| 60 | u16 max_rqe; |
| 61 | u32 max_inline_data; |
| 62 | int max_send_sge; |
| 63 | int max_recv_sge; |
| 64 | int max_mr; |
| 65 | u64 max_mr_size; |
| 66 | u32 max_num_mr_pbl; |
| 67 | int max_fmr; |
| 68 | int max_map_per_fmr; |
| 69 | int max_pages_per_frmr; |
| 70 | u16 max_ord_per_qp; |
| 71 | u16 max_ird_per_qp; |
| 72 | |
| 73 | int device_cap_flags; |
| 74 | u8 cq_overflow_detect; |
| 75 | u8 srq_supported; |
| 76 | |
| 77 | u32 wqe_size; |
| 78 | u32 rqe_size; |
| 79 | u32 ird_page_size; |
| 80 | u8 local_ca_ack_delay; |
| 81 | u8 ird; |
| 82 | u8 num_ird_pages; |
| 83 | }; |
| 84 | |
| 85 | struct ocrdma_pbl { |
| 86 | void *va; |
| 87 | dma_addr_t pa; |
| 88 | }; |
| 89 | |
| 90 | struct ocrdma_queue_info { |
| 91 | void *va; |
| 92 | dma_addr_t dma; |
| 93 | u32 size; |
| 94 | u16 len; |
| 95 | u16 entry_size; /* Size of an element in the queue */ |
| 96 | u16 id; /* qid, where to ring the doorbell. */ |
| 97 | u16 head, tail; |
| 98 | bool created; |
| 99 | atomic_t used; /* Number of valid elements in the queue */ |
| 100 | }; |
| 101 | |
| 102 | struct ocrdma_eq { |
| 103 | struct ocrdma_queue_info q; |
| 104 | u32 vector; |
| 105 | int cq_cnt; |
| 106 | struct ocrdma_dev *dev; |
| 107 | char irq_name[32]; |
| 108 | }; |
| 109 | |
| 110 | struct ocrdma_mq { |
| 111 | struct ocrdma_queue_info sq; |
| 112 | struct ocrdma_queue_info cq; |
| 113 | bool rearm_cq; |
| 114 | }; |
| 115 | |
| 116 | struct mqe_ctx { |
| 117 | struct mutex lock; /* for serializing mailbox commands on MQ */ |
| 118 | wait_queue_head_t cmd_wait; |
| 119 | u32 tag; |
| 120 | u16 cqe_status; |
| 121 | u16 ext_status; |
| 122 | bool cmd_done; |
| 123 | }; |
| 124 | |
| 125 | struct ocrdma_dev { |
| 126 | struct ib_device ibdev; |
| 127 | struct ocrdma_dev_attr attr; |
| 128 | |
| 129 | struct mutex dev_lock; /* provides syncronise access to device data */ |
| 130 | spinlock_t flush_q_lock ____cacheline_aligned; |
| 131 | |
| 132 | struct ocrdma_cq **cq_tbl; |
| 133 | struct ocrdma_qp **qp_tbl; |
| 134 | |
| 135 | struct ocrdma_eq meq; |
| 136 | struct ocrdma_eq *qp_eq_tbl; |
| 137 | int eq_cnt; |
| 138 | u16 base_eqid; |
| 139 | u16 max_eq; |
| 140 | |
| 141 | union ib_gid *sgid_tbl; |
| 142 | /* provided synchronization to sgid table for |
| 143 | * updating gid entries triggered by notifier. |
| 144 | */ |
| 145 | spinlock_t sgid_lock; |
| 146 | |
| 147 | int gsi_qp_created; |
| 148 | struct ocrdma_cq *gsi_sqcq; |
| 149 | struct ocrdma_cq *gsi_rqcq; |
| 150 | |
| 151 | struct { |
| 152 | struct ocrdma_av *va; |
| 153 | dma_addr_t pa; |
| 154 | u32 size; |
| 155 | u32 num_ah; |
| 156 | /* provide synchronization for av |
| 157 | * entry allocations. |
| 158 | */ |
| 159 | spinlock_t lock; |
| 160 | u32 ahid; |
| 161 | struct ocrdma_pbl pbl; |
| 162 | } av_tbl; |
| 163 | |
| 164 | void *mbx_cmd; |
| 165 | struct ocrdma_mq mq; |
| 166 | struct mqe_ctx mqe_ctx; |
| 167 | |
| 168 | struct be_dev_info nic_info; |
| 169 | |
| 170 | struct list_head entry; |
Sasha Levin | 3e4d60a | 2012-04-28 07:40:01 +0200 | [diff] [blame] | 171 | struct rcu_head rcu; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 172 | int id; |
| 173 | }; |
| 174 | |
| 175 | struct ocrdma_cq { |
| 176 | struct ib_cq ibcq; |
| 177 | struct ocrdma_dev *dev; |
| 178 | struct ocrdma_cqe *va; |
| 179 | u32 phase; |
| 180 | u32 getp; /* pointer to pending wrs to |
| 181 | * return to stack, wrap arounds |
| 182 | * at max_hw_cqe |
| 183 | */ |
| 184 | u32 max_hw_cqe; |
| 185 | bool phase_change; |
| 186 | bool armed, solicited; |
| 187 | bool arm_needed; |
| 188 | |
| 189 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization |
| 190 | * to cq polling |
| 191 | */ |
| 192 | /* syncronizes cq completion handler invoked from multiple context */ |
| 193 | spinlock_t comp_handler_lock ____cacheline_aligned; |
| 194 | u16 id; |
| 195 | u16 eqn; |
| 196 | |
| 197 | struct ocrdma_ucontext *ucontext; |
| 198 | dma_addr_t pa; |
| 199 | u32 len; |
| 200 | atomic_t use_cnt; |
| 201 | |
| 202 | /* head of all qp's sq and rq for which cqes need to be flushed |
| 203 | * by the software. |
| 204 | */ |
| 205 | struct list_head sq_head, rq_head; |
| 206 | }; |
| 207 | |
| 208 | struct ocrdma_pd { |
| 209 | struct ib_pd ibpd; |
| 210 | struct ocrdma_dev *dev; |
| 211 | struct ocrdma_ucontext *uctx; |
| 212 | atomic_t use_cnt; |
| 213 | u32 id; |
| 214 | int num_dpp_qp; |
| 215 | u32 dpp_page; |
| 216 | bool dpp_enabled; |
| 217 | }; |
| 218 | |
| 219 | struct ocrdma_ah { |
| 220 | struct ib_ah ibah; |
| 221 | struct ocrdma_dev *dev; |
| 222 | struct ocrdma_av *av; |
| 223 | u16 sgid_index; |
| 224 | u32 id; |
| 225 | }; |
| 226 | |
| 227 | struct ocrdma_qp_hwq_info { |
| 228 | u8 *va; /* virtual address */ |
| 229 | u32 max_sges; |
| 230 | u32 head, tail; |
| 231 | u32 entry_size; |
| 232 | u32 max_cnt; |
| 233 | u32 max_wqe_idx; |
Parav Pandit | fe2caef | 2012-03-21 04:09:06 +0530 | [diff] [blame] | 234 | u16 dbid; /* qid, where to ring the doorbell. */ |
| 235 | u32 len; |
| 236 | dma_addr_t pa; |
| 237 | }; |
| 238 | |
| 239 | struct ocrdma_srq { |
| 240 | struct ib_srq ibsrq; |
| 241 | struct ocrdma_dev *dev; |
| 242 | u8 __iomem *db; |
| 243 | /* provide synchronization to multiple context(s) posting rqe */ |
| 244 | spinlock_t q_lock ____cacheline_aligned; |
| 245 | |
| 246 | struct ocrdma_qp_hwq_info rq; |
| 247 | struct ocrdma_pd *pd; |
| 248 | atomic_t use_cnt; |
| 249 | u32 id; |
| 250 | u64 *rqe_wr_id_tbl; |
| 251 | u32 *idx_bit_fields; |
| 252 | u32 bit_fields_len; |
| 253 | }; |
| 254 | |
| 255 | struct ocrdma_qp { |
| 256 | struct ib_qp ibqp; |
| 257 | struct ocrdma_dev *dev; |
| 258 | |
| 259 | u8 __iomem *sq_db; |
| 260 | /* provide synchronization to multiple context(s) posting wqe, rqe */ |
| 261 | spinlock_t q_lock ____cacheline_aligned; |
| 262 | struct ocrdma_qp_hwq_info sq; |
| 263 | struct { |
| 264 | uint64_t wrid; |
| 265 | uint16_t dpp_wqe_idx; |
| 266 | uint16_t dpp_wqe; |
| 267 | uint8_t signaled; |
| 268 | uint8_t rsvd[3]; |
| 269 | } *wqe_wr_id_tbl; |
| 270 | u32 max_inline_data; |
| 271 | struct ocrdma_cq *sq_cq; |
| 272 | /* list maintained per CQ to flush SQ errors */ |
| 273 | struct list_head sq_entry; |
| 274 | |
| 275 | u8 __iomem *rq_db; |
| 276 | struct ocrdma_qp_hwq_info rq; |
| 277 | u64 *rqe_wr_id_tbl; |
| 278 | struct ocrdma_cq *rq_cq; |
| 279 | struct ocrdma_srq *srq; |
| 280 | /* list maintained per CQ to flush RQ errors */ |
| 281 | struct list_head rq_entry; |
| 282 | |
| 283 | enum ocrdma_qp_state state; /* QP state */ |
| 284 | int cap_flags; |
| 285 | u32 max_ord, max_ird; |
| 286 | |
| 287 | u32 id; |
| 288 | struct ocrdma_pd *pd; |
| 289 | |
| 290 | enum ib_qp_type qp_type; |
| 291 | |
| 292 | int sgid_idx; |
| 293 | u32 qkey; |
| 294 | bool dpp_enabled; |
| 295 | u8 *ird_q_va; |
| 296 | }; |
| 297 | |
| 298 | #define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \ |
| 299 | (((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \ |
| 300 | (qp->id < 64)) ? 24 : 16) |
| 301 | |
| 302 | struct ocrdma_hw_mr { |
| 303 | struct ocrdma_dev *dev; |
| 304 | u32 lkey; |
| 305 | u8 fr_mr; |
| 306 | u8 remote_atomic; |
| 307 | u8 remote_rd; |
| 308 | u8 remote_wr; |
| 309 | u8 local_rd; |
| 310 | u8 local_wr; |
| 311 | u8 mw_bind; |
| 312 | u8 rsvd; |
| 313 | u64 len; |
| 314 | struct ocrdma_pbl *pbl_table; |
| 315 | u32 num_pbls; |
| 316 | u32 num_pbes; |
| 317 | u32 pbl_size; |
| 318 | u32 pbe_size; |
| 319 | u64 fbo; |
| 320 | u64 va; |
| 321 | }; |
| 322 | |
| 323 | struct ocrdma_mr { |
| 324 | struct ib_mr ibmr; |
| 325 | struct ib_umem *umem; |
| 326 | struct ocrdma_hw_mr hwmr; |
| 327 | struct ocrdma_pd *pd; |
| 328 | }; |
| 329 | |
| 330 | struct ocrdma_ucontext { |
| 331 | struct ib_ucontext ibucontext; |
| 332 | struct ocrdma_dev *dev; |
| 333 | |
| 334 | struct list_head mm_head; |
| 335 | struct mutex mm_list_lock; /* protects list entries of mm type */ |
| 336 | struct { |
| 337 | u32 *va; |
| 338 | dma_addr_t pa; |
| 339 | u32 len; |
| 340 | } ah_tbl; |
| 341 | }; |
| 342 | |
| 343 | struct ocrdma_mm { |
| 344 | struct { |
| 345 | u64 phy_addr; |
| 346 | unsigned long len; |
| 347 | } key; |
| 348 | struct list_head entry; |
| 349 | }; |
| 350 | |
| 351 | static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) |
| 352 | { |
| 353 | return container_of(ibdev, struct ocrdma_dev, ibdev); |
| 354 | } |
| 355 | |
| 356 | static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext |
| 357 | *ibucontext) |
| 358 | { |
| 359 | return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); |
| 360 | } |
| 361 | |
| 362 | static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) |
| 363 | { |
| 364 | return container_of(ibpd, struct ocrdma_pd, ibpd); |
| 365 | } |
| 366 | |
| 367 | static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) |
| 368 | { |
| 369 | return container_of(ibcq, struct ocrdma_cq, ibcq); |
| 370 | } |
| 371 | |
| 372 | static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) |
| 373 | { |
| 374 | return container_of(ibqp, struct ocrdma_qp, ibqp); |
| 375 | } |
| 376 | |
| 377 | static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) |
| 378 | { |
| 379 | return container_of(ibmr, struct ocrdma_mr, ibmr); |
| 380 | } |
| 381 | |
| 382 | static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) |
| 383 | { |
| 384 | return container_of(ibah, struct ocrdma_ah, ibah); |
| 385 | } |
| 386 | |
| 387 | static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) |
| 388 | { |
| 389 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); |
| 390 | } |
| 391 | |
| 392 | #endif |