Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1 | /* |
Saeed Mahameed | 6cf0a15 | 2015-04-02 17:07:30 +0300 | [diff] [blame] | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef MLX5_IB_H |
| 34 | #define MLX5_IB_H |
| 35 | |
| 36 | #include <linux/kernel.h> |
| 37 | #include <linux/sched.h> |
| 38 | #include <rdma/ib_verbs.h> |
| 39 | #include <rdma/ib_smi.h> |
| 40 | #include <linux/mlx5/driver.h> |
| 41 | #include <linux/mlx5/cq.h> |
| 42 | #include <linux/mlx5/qp.h> |
| 43 | #include <linux/mlx5/srq.h> |
| 44 | #include <linux/types.h> |
majd@mellanox.com | 146d2f1 | 2016-01-14 19:13:02 +0200 | [diff] [blame] | 45 | #include <linux/mlx5/transobj.h> |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 46 | #include <rdma/ib_user_verbs.h> |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 47 | |
| 48 | #define mlx5_ib_dbg(dev, format, arg...) \ |
| 49 | pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
| 50 | __LINE__, current->pid, ##arg) |
| 51 | |
| 52 | #define mlx5_ib_err(dev, format, arg...) \ |
| 53 | pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
| 54 | __LINE__, current->pid, ##arg) |
| 55 | |
| 56 | #define mlx5_ib_warn(dev, format, arg...) \ |
| 57 | pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
| 58 | __LINE__, current->pid, ##arg) |
| 59 | |
Matan Barak | b368d7c | 2015-12-15 20:30:12 +0200 | [diff] [blame] | 60 | #define field_avail(type, fld, sz) (offsetof(type, fld) + \ |
| 61 | sizeof(((type *)0)->fld) <= (sz)) |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 62 | #define MLX5_IB_DEFAULT_UIDX 0xffffff |
| 63 | #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) |
Matan Barak | b368d7c | 2015-12-15 20:30:12 +0200 | [diff] [blame] | 64 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 65 | enum { |
| 66 | MLX5_IB_MMAP_CMD_SHIFT = 8, |
| 67 | MLX5_IB_MMAP_CMD_MASK = 0xff, |
| 68 | }; |
| 69 | |
| 70 | enum mlx5_ib_mmap_cmd { |
| 71 | MLX5_IB_MMAP_REGULAR_PAGE = 0, |
Matan Barak | d69e3bc | 2015-12-15 20:30:13 +0200 | [diff] [blame] | 72 | MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, |
Guy Levi | 37aa5c3 | 2016-04-27 16:49:50 +0300 | [diff] [blame] | 73 | MLX5_IB_MMAP_WC_PAGE = 2, |
| 74 | MLX5_IB_MMAP_NC_PAGE = 3, |
Matan Barak | d69e3bc | 2015-12-15 20:30:13 +0200 | [diff] [blame] | 75 | /* 5 is chosen in order to be compatible with old versions of libmlx5 */ |
| 76 | MLX5_IB_MMAP_CORE_CLOCK = 5, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 77 | }; |
| 78 | |
| 79 | enum { |
| 80 | MLX5_RES_SCAT_DATA32_CQE = 0x1, |
| 81 | MLX5_RES_SCAT_DATA64_CQE = 0x2, |
| 82 | MLX5_REQ_SCAT_DATA32_CQE = 0x11, |
| 83 | MLX5_REQ_SCAT_DATA64_CQE = 0x22, |
| 84 | }; |
| 85 | |
| 86 | enum mlx5_ib_latency_class { |
| 87 | MLX5_IB_LATENCY_CLASS_LOW, |
| 88 | MLX5_IB_LATENCY_CLASS_MEDIUM, |
| 89 | MLX5_IB_LATENCY_CLASS_HIGH, |
| 90 | MLX5_IB_LATENCY_CLASS_FAST_PATH |
| 91 | }; |
| 92 | |
| 93 | enum mlx5_ib_mad_ifc_flags { |
| 94 | MLX5_MAD_IFC_IGNORE_MKEY = 1, |
| 95 | MLX5_MAD_IFC_IGNORE_BKEY = 2, |
| 96 | MLX5_MAD_IFC_NET_VIEW = 4, |
| 97 | }; |
| 98 | |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 99 | enum { |
| 100 | MLX5_CROSS_CHANNEL_UUAR = 0, |
| 101 | }; |
| 102 | |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 103 | enum { |
| 104 | MLX5_CQE_VERSION_V0, |
| 105 | MLX5_CQE_VERSION_V1, |
| 106 | }; |
| 107 | |
Maor Gottlieb | 7c2344c | 2016-06-17 14:56:44 +0300 | [diff] [blame] | 108 | struct mlx5_ib_vma_private_data { |
| 109 | struct list_head list; |
| 110 | struct vm_area_struct *vma; |
| 111 | }; |
| 112 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 113 | struct mlx5_ib_ucontext { |
| 114 | struct ib_ucontext ibucontext; |
| 115 | struct list_head db_page_list; |
| 116 | |
| 117 | /* protect doorbell record alloc/free |
| 118 | */ |
| 119 | struct mutex db_page_mutex; |
| 120 | struct mlx5_uuar_info uuari; |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 121 | u8 cqe_version; |
majd@mellanox.com | 146d2f1 | 2016-01-14 19:13:02 +0200 | [diff] [blame] | 122 | /* Transport Domain number */ |
| 123 | u32 tdn; |
Maor Gottlieb | 7c2344c | 2016-06-17 14:56:44 +0300 | [diff] [blame] | 124 | struct list_head vma_private_list; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 125 | }; |
| 126 | |
| 127 | static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) |
| 128 | { |
| 129 | return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); |
| 130 | } |
| 131 | |
| 132 | struct mlx5_ib_pd { |
| 133 | struct ib_pd ibpd; |
| 134 | u32 pdn; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 135 | }; |
| 136 | |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 137 | #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) |
Maor Gottlieb | 35d19011 | 2016-03-07 18:51:47 +0200 | [diff] [blame] | 138 | #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 139 | #if (MLX5_IB_FLOW_LAST_PRIO <= 0) |
| 140 | #error "Invalid number of bypass priorities" |
| 141 | #endif |
| 142 | #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) |
| 143 | |
| 144 | #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) |
| 145 | struct mlx5_ib_flow_prio { |
| 146 | struct mlx5_flow_table *flow_table; |
| 147 | unsigned int refcount; |
| 148 | }; |
| 149 | |
| 150 | struct mlx5_ib_flow_handler { |
| 151 | struct list_head list; |
| 152 | struct ib_flow ibflow; |
| 153 | unsigned int prio; |
| 154 | struct mlx5_flow_rule *rule; |
| 155 | }; |
| 156 | |
| 157 | struct mlx5_ib_flow_db { |
| 158 | struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; |
| 159 | /* Protect flow steering bypass flow tables |
| 160 | * when add/del flow rules. |
| 161 | * only single add/removal of flow steering rule could be done |
| 162 | * simultaneously. |
| 163 | */ |
| 164 | struct mutex lock; |
| 165 | }; |
| 166 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 167 | /* Use macros here so that don't have to duplicate |
| 168 | * enum ib_send_flags and enum ib_qp_type for low-level driver |
| 169 | */ |
| 170 | |
| 171 | #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 172 | #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1) |
| 173 | #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2) |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 174 | |
| 175 | #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3) |
| 176 | #define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4) |
| 177 | #define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END |
| 178 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 179 | #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 180 | /* |
| 181 | * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI |
| 182 | * creates the actual hardware QP. |
| 183 | */ |
| 184 | #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 185 | #define MLX5_IB_WR_UMR IB_WR_RESERVED1 |
| 186 | |
Haggai Eran | b11a4f9 | 2016-02-29 15:45:03 +0200 | [diff] [blame] | 187 | /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. |
| 188 | * |
| 189 | * These flags are intended for internal use by the mlx5_ib driver, and they |
| 190 | * rely on the range reserved for that use in the ib_qp_create_flags enum. |
| 191 | */ |
| 192 | |
| 193 | /* Create a UD QP whose source QP number is 1 */ |
| 194 | static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void) |
| 195 | { |
| 196 | return IB_QP_CREATE_RESERVED_START; |
| 197 | } |
| 198 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 199 | struct wr_list { |
| 200 | u16 opcode; |
| 201 | u16 next; |
| 202 | }; |
| 203 | |
| 204 | struct mlx5_ib_wq { |
| 205 | u64 *wrid; |
| 206 | u32 *wr_data; |
| 207 | struct wr_list *w_list; |
| 208 | unsigned *wqe_head; |
| 209 | u16 unsig_count; |
| 210 | |
| 211 | /* serialize post to the work queue |
| 212 | */ |
| 213 | spinlock_t lock; |
| 214 | int wqe_cnt; |
| 215 | int max_post; |
| 216 | int max_gs; |
| 217 | int offset; |
| 218 | int wqe_shift; |
| 219 | unsigned head; |
| 220 | unsigned tail; |
| 221 | u16 cur_post; |
| 222 | u16 last_poll; |
| 223 | void *qend; |
| 224 | }; |
| 225 | |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 226 | struct mlx5_ib_rwq { |
| 227 | struct ib_wq ibwq; |
| 228 | u32 rqn; |
| 229 | u32 rq_num_pas; |
| 230 | u32 log_rq_stride; |
| 231 | u32 log_rq_size; |
| 232 | u32 rq_page_offset; |
| 233 | u32 log_page_size; |
| 234 | struct ib_umem *umem; |
| 235 | size_t buf_size; |
| 236 | unsigned int page_shift; |
| 237 | int create_type; |
| 238 | struct mlx5_db db; |
| 239 | u32 user_index; |
| 240 | u32 wqe_count; |
| 241 | u32 wqe_shift; |
| 242 | int wq_sig; |
| 243 | }; |
| 244 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 245 | enum { |
| 246 | MLX5_QP_USER, |
| 247 | MLX5_QP_KERNEL, |
| 248 | MLX5_QP_EMPTY |
| 249 | }; |
| 250 | |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 251 | enum { |
| 252 | MLX5_WQ_USER, |
| 253 | MLX5_WQ_KERNEL |
| 254 | }; |
| 255 | |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 256 | struct mlx5_ib_rwq_ind_table { |
| 257 | struct ib_rwq_ind_table ib_rwq_ind_tbl; |
| 258 | u32 rqtn; |
| 259 | }; |
| 260 | |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 261 | /* |
| 262 | * Connect-IB can trigger up to four concurrent pagefaults |
| 263 | * per-QP. |
| 264 | */ |
| 265 | enum mlx5_ib_pagefault_context { |
| 266 | MLX5_IB_PAGEFAULT_RESPONDER_READ, |
| 267 | MLX5_IB_PAGEFAULT_REQUESTOR_READ, |
| 268 | MLX5_IB_PAGEFAULT_RESPONDER_WRITE, |
| 269 | MLX5_IB_PAGEFAULT_REQUESTOR_WRITE, |
| 270 | MLX5_IB_PAGEFAULT_CONTEXTS |
| 271 | }; |
| 272 | |
| 273 | static inline enum mlx5_ib_pagefault_context |
| 274 | mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault) |
| 275 | { |
| 276 | return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE); |
| 277 | } |
| 278 | |
| 279 | struct mlx5_ib_pfault { |
| 280 | struct work_struct work; |
| 281 | struct mlx5_pagefault mpfault; |
| 282 | }; |
| 283 | |
majd@mellanox.com | 19098df | 2016-01-14 19:13:03 +0200 | [diff] [blame] | 284 | struct mlx5_ib_ubuffer { |
| 285 | struct ib_umem *umem; |
| 286 | int buf_size; |
| 287 | u64 buf_addr; |
| 288 | }; |
| 289 | |
| 290 | struct mlx5_ib_qp_base { |
| 291 | struct mlx5_ib_qp *container_mibqp; |
| 292 | struct mlx5_core_qp mqp; |
| 293 | struct mlx5_ib_ubuffer ubuffer; |
| 294 | }; |
| 295 | |
| 296 | struct mlx5_ib_qp_trans { |
| 297 | struct mlx5_ib_qp_base base; |
| 298 | u16 xrcdn; |
| 299 | u8 alt_port; |
| 300 | u8 atomic_rd_en; |
| 301 | u8 resp_depth; |
| 302 | }; |
| 303 | |
Yishai Hadas | 28d6137 | 2016-05-23 15:20:56 +0300 | [diff] [blame] | 304 | struct mlx5_ib_rss_qp { |
| 305 | u32 tirn; |
| 306 | }; |
| 307 | |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 308 | struct mlx5_ib_rq { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 309 | struct mlx5_ib_qp_base base; |
| 310 | struct mlx5_ib_wq *rq; |
| 311 | struct mlx5_ib_ubuffer ubuffer; |
| 312 | struct mlx5_db *doorbell; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 313 | u32 tirn; |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 314 | u8 state; |
| 315 | }; |
| 316 | |
| 317 | struct mlx5_ib_sq { |
| 318 | struct mlx5_ib_qp_base base; |
| 319 | struct mlx5_ib_wq *sq; |
| 320 | struct mlx5_ib_ubuffer ubuffer; |
| 321 | struct mlx5_db *doorbell; |
| 322 | u32 tisn; |
| 323 | u8 state; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 324 | }; |
| 325 | |
| 326 | struct mlx5_ib_raw_packet_qp { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 327 | struct mlx5_ib_sq sq; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 328 | struct mlx5_ib_rq rq; |
| 329 | }; |
| 330 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 331 | struct mlx5_ib_qp { |
| 332 | struct ib_qp ibqp; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 333 | union { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 334 | struct mlx5_ib_qp_trans trans_qp; |
| 335 | struct mlx5_ib_raw_packet_qp raw_packet_qp; |
Yishai Hadas | 28d6137 | 2016-05-23 15:20:56 +0300 | [diff] [blame] | 336 | struct mlx5_ib_rss_qp rss_qp; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 337 | }; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 338 | struct mlx5_buf buf; |
| 339 | |
| 340 | struct mlx5_db db; |
| 341 | struct mlx5_ib_wq rq; |
| 342 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 343 | u8 sq_signal_bits; |
| 344 | u8 fm_cache; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 345 | struct mlx5_ib_wq sq; |
| 346 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 347 | /* serialize qp state modifications |
| 348 | */ |
| 349 | struct mutex mutex; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 350 | u32 flags; |
| 351 | u8 port; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 352 | u8 state; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 353 | int wq_sig; |
| 354 | int scat_cqe; |
| 355 | int max_inline_data; |
| 356 | struct mlx5_bf *bf; |
| 357 | int has_rq; |
| 358 | |
| 359 | /* only for user space QPs. For kernel |
| 360 | * we have it from the bf object |
| 361 | */ |
| 362 | int uuarn; |
| 363 | |
| 364 | int create_type; |
Sagi Grimberg | e1e66cc | 2014-02-23 14:19:07 +0200 | [diff] [blame] | 365 | |
| 366 | /* Store signature errors */ |
| 367 | bool signature_en; |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 368 | |
| 369 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| 370 | /* |
| 371 | * A flag that is true for QP's that are in a state that doesn't |
| 372 | * allow page faults, and shouldn't schedule any more faults. |
| 373 | */ |
| 374 | int disable_page_faults; |
| 375 | /* |
| 376 | * The disable_page_faults_lock protects a QP's disable_page_faults |
| 377 | * field, allowing for a thread to atomically check whether the QP |
| 378 | * allows page faults, and if so schedule a page fault. |
| 379 | */ |
| 380 | spinlock_t disable_page_faults_lock; |
| 381 | struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS]; |
| 382 | #endif |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 383 | struct list_head qps_list; |
| 384 | struct list_head cq_recv_list; |
| 385 | struct list_head cq_send_list; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 386 | }; |
| 387 | |
| 388 | struct mlx5_ib_cq_buf { |
| 389 | struct mlx5_buf buf; |
| 390 | struct ib_umem *umem; |
| 391 | int cqe_size; |
Eli Cohen | bde5158 | 2014-01-14 17:45:18 +0200 | [diff] [blame] | 392 | int nent; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 393 | }; |
| 394 | |
| 395 | enum mlx5_ib_qp_flags { |
Erez Shitrit | f031396 | 2016-02-21 16:27:17 +0200 | [diff] [blame] | 396 | MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, |
| 397 | MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, |
| 398 | MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL, |
| 399 | MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND, |
| 400 | MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV, |
| 401 | MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5, |
Haggai Eran | b11a4f9 | 2016-02-29 15:45:03 +0200 | [diff] [blame] | 402 | /* QP uses 1 as its source QP number */ |
| 403 | MLX5_IB_QP_SQPN_QP1 = 1 << 6, |
Majd Dibbiny | 358e42e | 2016-04-17 17:19:37 +0300 | [diff] [blame] | 404 | MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, |
Yishai Hadas | d9f88e5 | 2016-08-28 10:58:37 +0300 | [diff] [blame^] | 405 | MLX5_IB_QP_RSS = 1 << 8, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 406 | }; |
| 407 | |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 408 | struct mlx5_umr_wr { |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 409 | struct ib_send_wr wr; |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 410 | union { |
| 411 | u64 virt_addr; |
| 412 | u64 offset; |
| 413 | } target; |
| 414 | struct ib_pd *pd; |
| 415 | unsigned int page_shift; |
| 416 | unsigned int npages; |
| 417 | u32 length; |
| 418 | int access_flags; |
| 419 | u32 mkey; |
| 420 | }; |
| 421 | |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 422 | static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr) |
| 423 | { |
| 424 | return container_of(wr, struct mlx5_umr_wr, wr); |
| 425 | } |
| 426 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 427 | struct mlx5_shared_mr_info { |
| 428 | int mr_id; |
| 429 | struct ib_umem *umem; |
| 430 | }; |
| 431 | |
| 432 | struct mlx5_ib_cq { |
| 433 | struct ib_cq ibcq; |
| 434 | struct mlx5_core_cq mcq; |
| 435 | struct mlx5_ib_cq_buf buf; |
| 436 | struct mlx5_db db; |
| 437 | |
| 438 | /* serialize access to the CQ |
| 439 | */ |
| 440 | spinlock_t lock; |
| 441 | |
| 442 | /* protect resize cq |
| 443 | */ |
| 444 | struct mutex resize_mutex; |
Eli Cohen | bde5158 | 2014-01-14 17:45:18 +0200 | [diff] [blame] | 445 | struct mlx5_ib_cq_buf *resize_buf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 446 | struct ib_umem *resize_umem; |
| 447 | int cqe_size; |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 448 | struct list_head list_send_qp; |
| 449 | struct list_head list_recv_qp; |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 450 | u32 create_flags; |
Haggai Eran | 25361e0 | 2016-02-29 15:45:08 +0200 | [diff] [blame] | 451 | struct list_head wc_list; |
| 452 | enum ib_cq_notify_flags notify_flags; |
| 453 | struct work_struct notify_work; |
| 454 | }; |
| 455 | |
| 456 | struct mlx5_ib_wc { |
| 457 | struct ib_wc wc; |
| 458 | struct list_head list; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 459 | }; |
| 460 | |
| 461 | struct mlx5_ib_srq { |
| 462 | struct ib_srq ibsrq; |
| 463 | struct mlx5_core_srq msrq; |
| 464 | struct mlx5_buf buf; |
| 465 | struct mlx5_db db; |
| 466 | u64 *wrid; |
| 467 | /* protect SRQ hanlding |
| 468 | */ |
| 469 | spinlock_t lock; |
| 470 | int head; |
| 471 | int tail; |
| 472 | u16 wqe_ctr; |
| 473 | struct ib_umem *umem; |
| 474 | /* serialize arming a SRQ |
| 475 | */ |
| 476 | struct mutex mutex; |
| 477 | int wq_sig; |
| 478 | }; |
| 479 | |
| 480 | struct mlx5_ib_xrcd { |
| 481 | struct ib_xrcd ibxrcd; |
| 482 | u32 xrcdn; |
| 483 | }; |
| 484 | |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 485 | enum mlx5_ib_mtt_access_flags { |
| 486 | MLX5_IB_MTT_READ = (1 << 0), |
| 487 | MLX5_IB_MTT_WRITE = (1 << 1), |
| 488 | }; |
| 489 | |
| 490 | #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) |
| 491 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 492 | struct mlx5_ib_mr { |
| 493 | struct ib_mr ibmr; |
Sagi Grimberg | 8a187ee | 2015-10-13 19:11:26 +0300 | [diff] [blame] | 494 | void *descs; |
| 495 | dma_addr_t desc_map; |
| 496 | int ndescs; |
| 497 | int max_descs; |
| 498 | int desc_size; |
Sagi Grimberg | b005d31 | 2016-02-29 19:07:33 +0200 | [diff] [blame] | 499 | int access_mode; |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 500 | struct mlx5_core_mkey mmkey; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 501 | struct ib_umem *umem; |
| 502 | struct mlx5_shared_mr_info *smr_info; |
| 503 | struct list_head list; |
| 504 | int order; |
| 505 | int umred; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 506 | int npages; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 507 | struct mlx5_ib_dev *dev; |
| 508 | struct mlx5_create_mkey_mbox_out out; |
Sagi Grimberg | 3121e3c | 2014-02-23 14:19:06 +0200 | [diff] [blame] | 509 | struct mlx5_core_sig_ctx *sig; |
Haggai Eran | b4cfe44 | 2014-12-11 17:04:26 +0200 | [diff] [blame] | 510 | int live; |
Sagi Grimberg | 8a187ee | 2015-10-13 19:11:26 +0300 | [diff] [blame] | 511 | void *descs_alloc; |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 512 | int access_flags; /* Needed for rereg MR */ |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 513 | }; |
| 514 | |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 515 | struct mlx5_ib_mw { |
| 516 | struct ib_mw ibmw; |
| 517 | struct mlx5_core_mkey mmkey; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 518 | }; |
| 519 | |
Shachar Raindel | a74d241 | 2014-05-22 14:50:12 +0300 | [diff] [blame] | 520 | struct mlx5_ib_umr_context { |
Christoph Hellwig | add08d7 | 2016-03-03 09:38:22 +0100 | [diff] [blame] | 521 | struct ib_cqe cqe; |
Shachar Raindel | a74d241 | 2014-05-22 14:50:12 +0300 | [diff] [blame] | 522 | enum ib_wc_status status; |
| 523 | struct completion done; |
| 524 | }; |
| 525 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 526 | struct umr_common { |
| 527 | struct ib_pd *pd; |
| 528 | struct ib_cq *cq; |
| 529 | struct ib_qp *qp; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 530 | /* control access to UMR QP |
| 531 | */ |
| 532 | struct semaphore sem; |
| 533 | }; |
| 534 | |
| 535 | enum { |
| 536 | MLX5_FMR_INVALID, |
| 537 | MLX5_FMR_VALID, |
| 538 | MLX5_FMR_BUSY, |
| 539 | }; |
| 540 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 541 | struct mlx5_cache_ent { |
| 542 | struct list_head head; |
| 543 | /* sync access to the cahce entry |
| 544 | */ |
| 545 | spinlock_t lock; |
| 546 | |
| 547 | |
| 548 | struct dentry *dir; |
| 549 | char name[4]; |
| 550 | u32 order; |
| 551 | u32 size; |
| 552 | u32 cur; |
| 553 | u32 miss; |
| 554 | u32 limit; |
| 555 | |
| 556 | struct dentry *fsize; |
| 557 | struct dentry *fcur; |
| 558 | struct dentry *fmiss; |
| 559 | struct dentry *flimit; |
| 560 | |
| 561 | struct mlx5_ib_dev *dev; |
| 562 | struct work_struct work; |
| 563 | struct delayed_work dwork; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 564 | int pending; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 565 | }; |
| 566 | |
| 567 | struct mlx5_mr_cache { |
| 568 | struct workqueue_struct *wq; |
| 569 | struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; |
| 570 | int stopped; |
| 571 | struct dentry *root; |
| 572 | unsigned long last_add; |
| 573 | }; |
| 574 | |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 575 | struct mlx5_ib_gsi_qp; |
| 576 | |
| 577 | struct mlx5_ib_port_resources { |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 578 | struct mlx5_ib_resources *devr; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 579 | struct mlx5_ib_gsi_qp *gsi; |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 580 | struct work_struct pkey_change_work; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 581 | }; |
| 582 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 583 | struct mlx5_ib_resources { |
| 584 | struct ib_cq *c0; |
| 585 | struct ib_xrcd *x0; |
| 586 | struct ib_xrcd *x1; |
| 587 | struct ib_pd *p0; |
| 588 | struct ib_srq *s0; |
Haggai Abramonvsky | 4aa17b2 | 2015-06-04 19:30:48 +0300 | [diff] [blame] | 589 | struct ib_srq *s1; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 590 | struct mlx5_ib_port_resources ports[2]; |
| 591 | /* Protects changes to the port resources */ |
| 592 | struct mutex mutex; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 593 | }; |
| 594 | |
Mark Bloch | 0837e86 | 2016-06-17 15:10:55 +0300 | [diff] [blame] | 595 | struct mlx5_ib_port { |
| 596 | u16 q_cnt_id; |
| 597 | }; |
| 598 | |
Achiad Shochat | fc24fc5 | 2015-12-23 18:47:17 +0200 | [diff] [blame] | 599 | struct mlx5_roce { |
| 600 | /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL |
| 601 | * netdev pointer |
| 602 | */ |
| 603 | rwlock_t netdev_lock; |
| 604 | struct net_device *netdev; |
| 605 | struct notifier_block nb; |
| 606 | }; |
| 607 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 608 | struct mlx5_ib_dev { |
| 609 | struct ib_device ib_dev; |
Jack Morgenstein | 9603b61 | 2014-07-28 23:30:22 +0300 | [diff] [blame] | 610 | struct mlx5_core_dev *mdev; |
Achiad Shochat | fc24fc5 | 2015-12-23 18:47:17 +0200 | [diff] [blame] | 611 | struct mlx5_roce roce; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 612 | MLX5_DECLARE_DOORBELL_LOCK(uar_lock); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 613 | int num_ports; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 614 | /* serialize update of capability mask |
| 615 | */ |
| 616 | struct mutex cap_mask_mutex; |
| 617 | bool ib_active; |
| 618 | struct umr_common umrc; |
| 619 | /* sync used page count stats |
| 620 | */ |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 621 | struct mlx5_ib_resources devr; |
| 622 | struct mlx5_mr_cache cache; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 623 | struct timer_list delay_timer; |
| 624 | int fill_delay; |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 625 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| 626 | struct ib_odp_caps odp_caps; |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 627 | /* |
| 628 | * Sleepable RCU that prevents destruction of MRs while they are still |
| 629 | * being used by a page fault handler. |
| 630 | */ |
| 631 | struct srcu_struct mr_srcu; |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 632 | #endif |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 633 | struct mlx5_ib_flow_db flow_db; |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 634 | /* protect resources needed as part of reset flow */ |
| 635 | spinlock_t reset_flow_resource_lock; |
| 636 | struct list_head qp_list; |
Mark Bloch | 0837e86 | 2016-06-17 15:10:55 +0300 | [diff] [blame] | 637 | /* Array with num_ports elements */ |
| 638 | struct mlx5_ib_port *port; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 639 | }; |
| 640 | |
| 641 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) |
| 642 | { |
| 643 | return container_of(mcq, struct mlx5_ib_cq, mcq); |
| 644 | } |
| 645 | |
| 646 | static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) |
| 647 | { |
| 648 | return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); |
| 649 | } |
| 650 | |
| 651 | static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) |
| 652 | { |
| 653 | return container_of(ibdev, struct mlx5_ib_dev, ib_dev); |
| 654 | } |
| 655 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 656 | static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) |
| 657 | { |
| 658 | return container_of(ibcq, struct mlx5_ib_cq, ibcq); |
| 659 | } |
| 660 | |
| 661 | static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) |
| 662 | { |
majd@mellanox.com | 19098df | 2016-01-14 19:13:03 +0200 | [diff] [blame] | 663 | return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 664 | } |
| 665 | |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 666 | static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey) |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 667 | { |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 668 | return container_of(mmkey, struct mlx5_ib_mr, mmkey); |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 669 | } |
| 670 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 671 | static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) |
| 672 | { |
| 673 | return container_of(ibpd, struct mlx5_ib_pd, ibpd); |
| 674 | } |
| 675 | |
| 676 | static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) |
| 677 | { |
| 678 | return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); |
| 679 | } |
| 680 | |
| 681 | static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) |
| 682 | { |
| 683 | return container_of(ibqp, struct mlx5_ib_qp, ibqp); |
| 684 | } |
| 685 | |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 686 | static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) |
| 687 | { |
| 688 | return container_of(ibwq, struct mlx5_ib_rwq, ibwq); |
| 689 | } |
| 690 | |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 691 | static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) |
| 692 | { |
| 693 | return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); |
| 694 | } |
| 695 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 696 | static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) |
| 697 | { |
| 698 | return container_of(msrq, struct mlx5_ib_srq, msrq); |
| 699 | } |
| 700 | |
| 701 | static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) |
| 702 | { |
| 703 | return container_of(ibmr, struct mlx5_ib_mr, ibmr); |
| 704 | } |
| 705 | |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 706 | static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) |
| 707 | { |
| 708 | return container_of(ibmw, struct mlx5_ib_mw, ibmw); |
| 709 | } |
| 710 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 711 | struct mlx5_ib_ah { |
| 712 | struct ib_ah ibah; |
| 713 | struct mlx5_av av; |
| 714 | }; |
| 715 | |
| 716 | static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) |
| 717 | { |
| 718 | return container_of(ibah, struct mlx5_ib_ah, ibah); |
| 719 | } |
| 720 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 721 | int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, |
| 722 | struct mlx5_db *db); |
| 723 | void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); |
| 724 | void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
| 725 | void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
| 726 | void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); |
| 727 | int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, |
Ira Weiny | a97e2d8 | 2015-05-31 17:15:30 -0400 | [diff] [blame] | 728 | u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
| 729 | const void *in_mad, void *response_mad); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 730 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); |
| 731 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); |
| 732 | int mlx5_ib_destroy_ah(struct ib_ah *ah); |
| 733 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, |
| 734 | struct ib_srq_init_attr *init_attr, |
| 735 | struct ib_udata *udata); |
| 736 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
| 737 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
| 738 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); |
| 739 | int mlx5_ib_destroy_srq(struct ib_srq *srq); |
| 740 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, |
| 741 | struct ib_recv_wr **bad_wr); |
| 742 | struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, |
| 743 | struct ib_qp_init_attr *init_attr, |
| 744 | struct ib_udata *udata); |
| 745 | int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
| 746 | int attr_mask, struct ib_udata *udata); |
| 747 | int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, |
| 748 | struct ib_qp_init_attr *qp_init_attr); |
| 749 | int mlx5_ib_destroy_qp(struct ib_qp *qp); |
| 750 | int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
| 751 | struct ib_send_wr **bad_wr); |
| 752 | int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
| 753 | struct ib_recv_wr **bad_wr); |
| 754 | void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); |
Haggai Eran | c1395a2 | 2014-12-11 17:04:14 +0200 | [diff] [blame] | 755 | int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, |
majd@mellanox.com | 19098df | 2016-01-14 19:13:03 +0200 | [diff] [blame] | 756 | void *buffer, u32 length, |
| 757 | struct mlx5_ib_qp_base *base); |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 758 | struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, |
| 759 | const struct ib_cq_init_attr *attr, |
| 760 | struct ib_ucontext *context, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 761 | struct ib_udata *udata); |
| 762 | int mlx5_ib_destroy_cq(struct ib_cq *cq); |
| 763 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
| 764 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
| 765 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
| 766 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); |
| 767 | struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); |
| 768 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
| 769 | u64 virt_addr, int access_flags, |
| 770 | struct ib_udata *udata); |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 771 | struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
| 772 | struct ib_udata *udata); |
| 773 | int mlx5_ib_dealloc_mw(struct ib_mw *mw); |
Haggai Eran | 832a6b0 | 2014-12-11 17:04:22 +0200 | [diff] [blame] | 774 | int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, |
| 775 | int npages, int zap); |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 776 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, |
| 777 | u64 length, u64 virt_addr, int access_flags, |
| 778 | struct ib_pd *pd, struct ib_udata *udata); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 779 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr); |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 780 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, |
| 781 | enum ib_mr_type mr_type, |
| 782 | u32 max_num_sg); |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 783 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 784 | unsigned int *sg_offset); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 785 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
Ira Weiny | a97e2d8 | 2015-05-31 17:15:30 -0400 | [diff] [blame] | 786 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 787 | const struct ib_mad_hdr *in, size_t in_mad_size, |
| 788 | struct ib_mad_hdr *out, size_t *out_mad_size, |
| 789 | u16 *out_mad_pkey_index); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 790 | struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, |
| 791 | struct ib_ucontext *context, |
| 792 | struct ib_udata *udata); |
| 793 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 794 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); |
| 795 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); |
Majd Dibbiny | 1b5daf1 | 2015-06-04 19:30:46 +0300 | [diff] [blame] | 796 | int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, |
| 797 | struct ib_smp *out_mad); |
| 798 | int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, |
| 799 | __be64 *sys_image_guid); |
| 800 | int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, |
| 801 | u16 *max_pkeys); |
| 802 | int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, |
| 803 | u32 *vendor_id); |
| 804 | int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); |
| 805 | int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); |
| 806 | int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, |
| 807 | u16 *pkey); |
| 808 | int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, |
| 809 | union ib_gid *gid); |
| 810 | int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, |
| 811 | struct ib_port_attr *props); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 812 | int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, |
| 813 | struct ib_port_attr *props); |
| 814 | int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); |
| 815 | void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); |
| 816 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, |
| 817 | int *ncont, int *order); |
Haggai Eran | 832a6b0 | 2014-12-11 17:04:22 +0200 | [diff] [blame] | 818 | void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, |
| 819 | int page_shift, size_t offset, size_t num_pages, |
| 820 | __be64 *pas, int access_flags); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 821 | void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 822 | int page_shift, __be64 *pas, int access_flags); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 823 | void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); |
| 824 | int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); |
| 825 | int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); |
| 826 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); |
| 827 | int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 828 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, |
| 829 | struct ib_mr_status *mr_status); |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 830 | struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, |
| 831 | struct ib_wq_init_attr *init_attr, |
| 832 | struct ib_udata *udata); |
| 833 | int mlx5_ib_destroy_wq(struct ib_wq *wq); |
| 834 | int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
| 835 | u32 wq_attr_mask, struct ib_udata *udata); |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 836 | struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, |
| 837 | struct ib_rwq_ind_table_init_attr *init_attr, |
| 838 | struct ib_udata *udata); |
| 839 | int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 840 | |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 841 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 842 | extern struct workqueue_struct *mlx5_ib_page_fault_wq; |
| 843 | |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 844 | void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 845 | void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, |
| 846 | struct mlx5_ib_pfault *pfault); |
| 847 | void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); |
| 848 | int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); |
| 849 | void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev); |
| 850 | int __init mlx5_ib_odp_init(void); |
| 851 | void mlx5_ib_odp_cleanup(void); |
| 852 | void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); |
| 853 | void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); |
Haggai Eran | b4cfe44 | 2014-12-11 17:04:26 +0200 | [diff] [blame] | 854 | void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, |
| 855 | unsigned long end); |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 856 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 857 | static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 858 | { |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 859 | return; |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 860 | } |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 861 | |
| 862 | static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} |
| 863 | static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } |
| 864 | static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {} |
| 865 | static inline int mlx5_ib_odp_init(void) { return 0; } |
| 866 | static inline void mlx5_ib_odp_cleanup(void) {} |
| 867 | static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {} |
| 868 | static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} |
| 869 | |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 870 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
| 871 | |
Arnd Bergmann | 9967c70 | 2016-03-23 11:37:45 +0100 | [diff] [blame] | 872 | int mlx5_ib_get_vf_config(struct ib_device *device, int vf, |
| 873 | u8 port, struct ifla_vf_info *info); |
| 874 | int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, |
| 875 | u8 port, int state); |
| 876 | int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, |
| 877 | u8 port, struct ifla_vf_stats *stats); |
| 878 | int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, |
| 879 | u64 guid, int type); |
| 880 | |
Achiad Shochat | 2811ba5 | 2015-12-23 18:47:24 +0200 | [diff] [blame] | 881 | __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, |
| 882 | int index); |
| 883 | |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 884 | /* GSI QP helper functions */ |
| 885 | struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, |
| 886 | struct ib_qp_init_attr *init_attr); |
| 887 | int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp); |
| 888 | int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, |
| 889 | int attr_mask); |
| 890 | int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
| 891 | int qp_attr_mask, |
| 892 | struct ib_qp_init_attr *qp_init_attr); |
| 893 | int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr, |
| 894 | struct ib_send_wr **bad_wr); |
| 895 | int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr, |
| 896 | struct ib_recv_wr **bad_wr); |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 897 | void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 898 | |
Haggai Eran | 25361e0 | 2016-02-29 15:45:08 +0200 | [diff] [blame] | 899 | int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); |
| 900 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 901 | static inline void init_query_mad(struct ib_smp *mad) |
| 902 | { |
| 903 | mad->base_version = 1; |
| 904 | mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; |
| 905 | mad->class_version = 1; |
| 906 | mad->method = IB_MGMT_METHOD_GET; |
| 907 | } |
| 908 | |
| 909 | static inline u8 convert_access(int acc) |
| 910 | { |
| 911 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | |
| 912 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | |
| 913 | (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | |
| 914 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | |
| 915 | MLX5_PERM_LOCAL_READ; |
| 916 | } |
| 917 | |
Sagi Grimberg | b636401 | 2015-09-02 22:23:04 +0300 | [diff] [blame] | 918 | static inline int is_qp1(enum ib_qp_type qp_type) |
| 919 | { |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 920 | return qp_type == MLX5_IB_QPT_HW_GSI; |
Sagi Grimberg | b636401 | 2015-09-02 22:23:04 +0300 | [diff] [blame] | 921 | } |
| 922 | |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 923 | #define MLX5_MAX_UMR_SHIFT 16 |
| 924 | #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT) |
| 925 | |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 926 | static inline u32 check_cq_create_flags(u32 flags) |
| 927 | { |
| 928 | /* |
| 929 | * It returns non-zero value for unsupported CQ |
| 930 | * create flags, otherwise it returns zero. |
| 931 | */ |
Leon Romanovsky | 34356f6 | 2015-12-29 17:01:30 +0200 | [diff] [blame] | 932 | return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN | |
| 933 | IB_CQ_FLAGS_TIMESTAMP_COMPLETION)); |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 934 | } |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 935 | |
| 936 | static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx, |
| 937 | u32 *user_index) |
| 938 | { |
| 939 | if (cqe_version) { |
| 940 | if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) || |
| 941 | (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK)) |
| 942 | return -EINVAL; |
| 943 | *user_index = cmd_uidx; |
| 944 | } else { |
| 945 | *user_index = MLX5_IB_DEFAULT_UIDX; |
| 946 | } |
| 947 | |
| 948 | return 0; |
| 949 | } |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 950 | #endif /* MLX5_IB_H */ |