Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1 | /* |
Saeed Mahameed | 6cf0a15 | 2015-04-02 17:07:30 +0300 | [diff] [blame] | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef MLX5_IB_H |
| 34 | #define MLX5_IB_H |
| 35 | |
| 36 | #include <linux/kernel.h> |
| 37 | #include <linux/sched.h> |
| 38 | #include <rdma/ib_verbs.h> |
| 39 | #include <rdma/ib_smi.h> |
| 40 | #include <linux/mlx5/driver.h> |
| 41 | #include <linux/mlx5/cq.h> |
| 42 | #include <linux/mlx5/qp.h> |
| 43 | #include <linux/mlx5/srq.h> |
| 44 | #include <linux/types.h> |
majd@mellanox.com | 146d2f1 | 2016-01-14 19:13:02 +0200 | [diff] [blame] | 45 | #include <linux/mlx5/transobj.h> |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 46 | #include <rdma/ib_user_verbs.h> |
Leon Romanovsky | 3085e29 | 2016-09-22 17:31:11 +0300 | [diff] [blame] | 47 | #include <rdma/mlx5-abi.h> |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 48 | |
| 49 | #define mlx5_ib_dbg(dev, format, arg...) \ |
| 50 | pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
| 51 | __LINE__, current->pid, ##arg) |
| 52 | |
| 53 | #define mlx5_ib_err(dev, format, arg...) \ |
| 54 | pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
| 55 | __LINE__, current->pid, ##arg) |
| 56 | |
| 57 | #define mlx5_ib_warn(dev, format, arg...) \ |
| 58 | pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
| 59 | __LINE__, current->pid, ##arg) |
| 60 | |
Matan Barak | b368d7c | 2015-12-15 20:30:12 +0200 | [diff] [blame] | 61 | #define field_avail(type, fld, sz) (offsetof(type, fld) + \ |
| 62 | sizeof(((type *)0)->fld) <= (sz)) |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 63 | #define MLX5_IB_DEFAULT_UIDX 0xffffff |
| 64 | #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) |
Matan Barak | b368d7c | 2015-12-15 20:30:12 +0200 | [diff] [blame] | 65 | |
Majd Dibbiny | 762f899 | 2016-10-27 16:36:47 +0300 | [diff] [blame] | 66 | #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size) |
| 67 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 68 | enum { |
| 69 | MLX5_IB_MMAP_CMD_SHIFT = 8, |
| 70 | MLX5_IB_MMAP_CMD_MASK = 0xff, |
| 71 | }; |
| 72 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 73 | enum { |
| 74 | MLX5_RES_SCAT_DATA32_CQE = 0x1, |
| 75 | MLX5_RES_SCAT_DATA64_CQE = 0x2, |
| 76 | MLX5_REQ_SCAT_DATA32_CQE = 0x11, |
| 77 | MLX5_REQ_SCAT_DATA64_CQE = 0x22, |
| 78 | }; |
| 79 | |
| 80 | enum mlx5_ib_latency_class { |
| 81 | MLX5_IB_LATENCY_CLASS_LOW, |
| 82 | MLX5_IB_LATENCY_CLASS_MEDIUM, |
| 83 | MLX5_IB_LATENCY_CLASS_HIGH, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 84 | }; |
| 85 | |
| 86 | enum mlx5_ib_mad_ifc_flags { |
| 87 | MLX5_MAD_IFC_IGNORE_MKEY = 1, |
| 88 | MLX5_MAD_IFC_IGNORE_BKEY = 2, |
| 89 | MLX5_MAD_IFC_NET_VIEW = 4, |
| 90 | }; |
| 91 | |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 92 | enum { |
Eli Cohen | 2f5ff26 | 2017-01-03 23:55:21 +0200 | [diff] [blame] | 93 | MLX5_CROSS_CHANNEL_BFREG = 0, |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 94 | }; |
| 95 | |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 96 | enum { |
| 97 | MLX5_CQE_VERSION_V0, |
| 98 | MLX5_CQE_VERSION_V1, |
| 99 | }; |
| 100 | |
Artemy Kovalyov | eb76189 | 2017-08-17 15:52:09 +0300 | [diff] [blame] | 101 | enum { |
| 102 | MLX5_TM_MAX_RNDV_MSG_SIZE = 64, |
| 103 | MLX5_TM_MAX_SGE = 1, |
| 104 | }; |
| 105 | |
Yishai Hadas | 4ed131d | 2017-12-24 16:31:35 +0200 | [diff] [blame] | 106 | enum { |
| 107 | MLX5_IB_INVALID_UAR_INDEX = BIT(31), |
Yishai Hadas | 1ee47ab | 2017-12-24 16:31:36 +0200 | [diff] [blame] | 108 | MLX5_IB_INVALID_BFREG = BIT(31), |
Yishai Hadas | 4ed131d | 2017-12-24 16:31:35 +0200 | [diff] [blame] | 109 | }; |
| 110 | |
Maor Gottlieb | 7c2344c | 2016-06-17 14:56:44 +0300 | [diff] [blame] | 111 | struct mlx5_ib_vma_private_data { |
| 112 | struct list_head list; |
| 113 | struct vm_area_struct *vma; |
Majd Dibbiny | ad9a366 | 2017-12-24 13:54:56 +0200 | [diff] [blame] | 114 | /* protect vma_private_list add/del */ |
| 115 | struct mutex *vma_private_list_mutex; |
Maor Gottlieb | 7c2344c | 2016-06-17 14:56:44 +0300 | [diff] [blame] | 116 | }; |
| 117 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 118 | struct mlx5_ib_ucontext { |
| 119 | struct ib_ucontext ibucontext; |
| 120 | struct list_head db_page_list; |
| 121 | |
| 122 | /* protect doorbell record alloc/free |
| 123 | */ |
| 124 | struct mutex db_page_mutex; |
Eli Cohen | 2f5ff26 | 2017-01-03 23:55:21 +0200 | [diff] [blame] | 125 | struct mlx5_bfreg_info bfregi; |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 126 | u8 cqe_version; |
majd@mellanox.com | 146d2f1 | 2016-01-14 19:13:02 +0200 | [diff] [blame] | 127 | /* Transport Domain number */ |
| 128 | u32 tdn; |
Maor Gottlieb | 7c2344c | 2016-06-17 14:56:44 +0300 | [diff] [blame] | 129 | struct list_head vma_private_list; |
Majd Dibbiny | ad9a366 | 2017-12-24 13:54:56 +0200 | [diff] [blame] | 130 | /* protect vma_private_list add/del */ |
| 131 | struct mutex vma_private_list_mutex; |
Artemy Kovalyov | 7d0cc6e | 2017-01-02 11:37:44 +0200 | [diff] [blame] | 132 | |
Eli Cohen | b037c29 | 2017-01-03 23:55:26 +0200 | [diff] [blame] | 133 | u64 lib_caps; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 134 | }; |
| 135 | |
| 136 | static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) |
| 137 | { |
| 138 | return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); |
| 139 | } |
| 140 | |
| 141 | struct mlx5_ib_pd { |
| 142 | struct ib_pd ibpd; |
| 143 | u32 pdn; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 144 | }; |
| 145 | |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 146 | #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) |
Maor Gottlieb | 35d19011 | 2016-03-07 18:51:47 +0200 | [diff] [blame] | 147 | #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 148 | #if (MLX5_IB_FLOW_LAST_PRIO <= 0) |
| 149 | #error "Invalid number of bypass priorities" |
| 150 | #endif |
| 151 | #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) |
| 152 | |
| 153 | #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) |
Maor Gottlieb | cc0e5d4 | 2016-08-28 14:16:34 +0300 | [diff] [blame] | 154 | #define MLX5_IB_NUM_SNIFFER_FTS 2 |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 155 | struct mlx5_ib_flow_prio { |
| 156 | struct mlx5_flow_table *flow_table; |
| 157 | unsigned int refcount; |
| 158 | }; |
| 159 | |
| 160 | struct mlx5_ib_flow_handler { |
| 161 | struct list_head list; |
| 162 | struct ib_flow ibflow; |
Maor Gottlieb | 5497adc | 2016-08-28 14:16:31 +0300 | [diff] [blame] | 163 | struct mlx5_ib_flow_prio *prio; |
Mark Bloch | 74491de | 2016-08-31 11:24:25 +0000 | [diff] [blame] | 164 | struct mlx5_flow_handle *rule; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 165 | }; |
| 166 | |
| 167 | struct mlx5_ib_flow_db { |
| 168 | struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; |
Maor Gottlieb | cc0e5d4 | 2016-08-28 14:16:34 +0300 | [diff] [blame] | 169 | struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; |
Aviv Heller | 9ef9c64 | 2016-09-18 20:48:01 +0300 | [diff] [blame] | 170 | struct mlx5_flow_table *lag_demux_ft; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 171 | /* Protect flow steering bypass flow tables |
| 172 | * when add/del flow rules. |
| 173 | * only single add/removal of flow steering rule could be done |
| 174 | * simultaneously. |
| 175 | */ |
| 176 | struct mutex lock; |
| 177 | }; |
| 178 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 179 | /* Use macros here so that don't have to duplicate |
| 180 | * enum ib_send_flags and enum ib_qp_type for low-level driver |
| 181 | */ |
| 182 | |
Artemy Kovalyov | 3161625 | 2017-01-02 11:37:42 +0200 | [diff] [blame] | 183 | #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) |
| 184 | #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) |
| 185 | #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) |
| 186 | #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) |
| 187 | #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) |
| 188 | #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 189 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 190 | #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 191 | /* |
| 192 | * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI |
| 193 | * creates the actual hardware QP. |
| 194 | */ |
| 195 | #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 |
Moni Shoua | b4aaa1f | 2018-01-02 16:19:31 +0200 | [diff] [blame] | 196 | #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3 |
| 197 | #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4 |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 198 | #define MLX5_IB_WR_UMR IB_WR_RESERVED1 |
| 199 | |
Artemy Kovalyov | 3161625 | 2017-01-02 11:37:42 +0200 | [diff] [blame] | 200 | #define MLX5_IB_UMR_OCTOWORD 16 |
| 201 | #define MLX5_IB_UMR_XLT_ALIGNMENT 64 |
| 202 | |
Artemy Kovalyov | 7d0cc6e | 2017-01-02 11:37:44 +0200 | [diff] [blame] | 203 | #define MLX5_IB_UPD_XLT_ZAP BIT(0) |
| 204 | #define MLX5_IB_UPD_XLT_ENABLE BIT(1) |
| 205 | #define MLX5_IB_UPD_XLT_ATOMIC BIT(2) |
| 206 | #define MLX5_IB_UPD_XLT_ADDR BIT(3) |
| 207 | #define MLX5_IB_UPD_XLT_PD BIT(4) |
| 208 | #define MLX5_IB_UPD_XLT_ACCESS BIT(5) |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 209 | #define MLX5_IB_UPD_XLT_INDIRECT BIT(6) |
Artemy Kovalyov | 7d0cc6e | 2017-01-02 11:37:44 +0200 | [diff] [blame] | 210 | |
Haggai Eran | b11a4f9 | 2016-02-29 15:45:03 +0200 | [diff] [blame] | 211 | /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. |
| 212 | * |
| 213 | * These flags are intended for internal use by the mlx5_ib driver, and they |
| 214 | * rely on the range reserved for that use in the ib_qp_create_flags enum. |
| 215 | */ |
| 216 | |
| 217 | /* Create a UD QP whose source QP number is 1 */ |
| 218 | static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void) |
| 219 | { |
| 220 | return IB_QP_CREATE_RESERVED_START; |
| 221 | } |
| 222 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 223 | struct wr_list { |
| 224 | u16 opcode; |
| 225 | u16 next; |
| 226 | }; |
| 227 | |
Noa Osherovich | e4cc4fa | 2017-01-18 15:40:03 +0200 | [diff] [blame] | 228 | enum mlx5_ib_rq_flags { |
| 229 | MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0, |
Noa Osherovich | b1383aa | 2017-10-29 13:59:45 +0200 | [diff] [blame] | 230 | MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1, |
Noa Osherovich | e4cc4fa | 2017-01-18 15:40:03 +0200 | [diff] [blame] | 231 | }; |
| 232 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 233 | struct mlx5_ib_wq { |
| 234 | u64 *wrid; |
| 235 | u32 *wr_data; |
| 236 | struct wr_list *w_list; |
| 237 | unsigned *wqe_head; |
| 238 | u16 unsig_count; |
| 239 | |
| 240 | /* serialize post to the work queue |
| 241 | */ |
| 242 | spinlock_t lock; |
| 243 | int wqe_cnt; |
| 244 | int max_post; |
| 245 | int max_gs; |
| 246 | int offset; |
| 247 | int wqe_shift; |
| 248 | unsigned head; |
| 249 | unsigned tail; |
| 250 | u16 cur_post; |
| 251 | u16 last_poll; |
| 252 | void *qend; |
| 253 | }; |
| 254 | |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 255 | enum mlx5_ib_wq_flags { |
| 256 | MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1, |
Noa Osherovich | ccc8708 | 2017-10-17 18:01:13 +0300 | [diff] [blame] | 257 | MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2, |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 258 | }; |
| 259 | |
Noa Osherovich | b4f3459 | 2017-10-17 18:01:12 +0300 | [diff] [blame] | 260 | #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 |
| 261 | #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16 |
| 262 | #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 |
| 263 | #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13 |
| 264 | |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 265 | struct mlx5_ib_rwq { |
| 266 | struct ib_wq ibwq; |
Yishai Hadas | 350d0e4 | 2016-08-28 14:58:18 +0300 | [diff] [blame] | 267 | struct mlx5_core_qp core_qp; |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 268 | u32 rq_num_pas; |
| 269 | u32 log_rq_stride; |
| 270 | u32 log_rq_size; |
| 271 | u32 rq_page_offset; |
| 272 | u32 log_page_size; |
Noa Osherovich | ccc8708 | 2017-10-17 18:01:13 +0300 | [diff] [blame] | 273 | u32 log_num_strides; |
| 274 | u32 two_byte_shift_en; |
| 275 | u32 single_stride_log_num_of_bytes; |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 276 | struct ib_umem *umem; |
| 277 | size_t buf_size; |
| 278 | unsigned int page_shift; |
| 279 | int create_type; |
| 280 | struct mlx5_db db; |
| 281 | u32 user_index; |
| 282 | u32 wqe_count; |
| 283 | u32 wqe_shift; |
| 284 | int wq_sig; |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 285 | u32 create_flags; /* Use enum mlx5_ib_wq_flags */ |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 286 | }; |
| 287 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 288 | enum { |
| 289 | MLX5_QP_USER, |
| 290 | MLX5_QP_KERNEL, |
| 291 | MLX5_QP_EMPTY |
| 292 | }; |
| 293 | |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 294 | enum { |
| 295 | MLX5_WQ_USER, |
| 296 | MLX5_WQ_KERNEL |
| 297 | }; |
| 298 | |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 299 | struct mlx5_ib_rwq_ind_table { |
| 300 | struct ib_rwq_ind_table ib_rwq_ind_tbl; |
| 301 | u32 rqtn; |
| 302 | }; |
| 303 | |
majd@mellanox.com | 19098df | 2016-01-14 19:13:03 +0200 | [diff] [blame] | 304 | struct mlx5_ib_ubuffer { |
| 305 | struct ib_umem *umem; |
| 306 | int buf_size; |
| 307 | u64 buf_addr; |
| 308 | }; |
| 309 | |
| 310 | struct mlx5_ib_qp_base { |
| 311 | struct mlx5_ib_qp *container_mibqp; |
| 312 | struct mlx5_core_qp mqp; |
| 313 | struct mlx5_ib_ubuffer ubuffer; |
| 314 | }; |
| 315 | |
| 316 | struct mlx5_ib_qp_trans { |
| 317 | struct mlx5_ib_qp_base base; |
| 318 | u16 xrcdn; |
| 319 | u8 alt_port; |
| 320 | u8 atomic_rd_en; |
| 321 | u8 resp_depth; |
| 322 | }; |
| 323 | |
Yishai Hadas | 28d6137 | 2016-05-23 15:20:56 +0300 | [diff] [blame] | 324 | struct mlx5_ib_rss_qp { |
| 325 | u32 tirn; |
| 326 | }; |
| 327 | |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 328 | struct mlx5_ib_rq { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 329 | struct mlx5_ib_qp_base base; |
| 330 | struct mlx5_ib_wq *rq; |
| 331 | struct mlx5_ib_ubuffer ubuffer; |
| 332 | struct mlx5_db *doorbell; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 333 | u32 tirn; |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 334 | u8 state; |
Noa Osherovich | e4cc4fa | 2017-01-18 15:40:03 +0200 | [diff] [blame] | 335 | u32 flags; |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 336 | }; |
| 337 | |
| 338 | struct mlx5_ib_sq { |
| 339 | struct mlx5_ib_qp_base base; |
| 340 | struct mlx5_ib_wq *sq; |
| 341 | struct mlx5_ib_ubuffer ubuffer; |
| 342 | struct mlx5_db *doorbell; |
Mark Bloch | b96c9dd | 2018-01-29 10:40:37 +0000 | [diff] [blame] | 343 | struct mlx5_flow_handle *flow_rule; |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 344 | u32 tisn; |
| 345 | u8 state; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 346 | }; |
| 347 | |
| 348 | struct mlx5_ib_raw_packet_qp { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 349 | struct mlx5_ib_sq sq; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 350 | struct mlx5_ib_rq rq; |
| 351 | }; |
| 352 | |
Eli Cohen | 5fe9dec | 2017-01-03 23:55:25 +0200 | [diff] [blame] | 353 | struct mlx5_bf { |
| 354 | int buf_size; |
| 355 | unsigned long offset; |
| 356 | struct mlx5_sq_bfreg *bfreg; |
| 357 | }; |
| 358 | |
Moni Shoua | b4aaa1f | 2018-01-02 16:19:31 +0200 | [diff] [blame] | 359 | struct mlx5_ib_dct { |
| 360 | struct mlx5_core_dct mdct; |
| 361 | u32 *in; |
| 362 | }; |
| 363 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 364 | struct mlx5_ib_qp { |
| 365 | struct ib_qp ibqp; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 366 | union { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 367 | struct mlx5_ib_qp_trans trans_qp; |
| 368 | struct mlx5_ib_raw_packet_qp raw_packet_qp; |
Yishai Hadas | 28d6137 | 2016-05-23 15:20:56 +0300 | [diff] [blame] | 369 | struct mlx5_ib_rss_qp rss_qp; |
Moni Shoua | b4aaa1f | 2018-01-02 16:19:31 +0200 | [diff] [blame] | 370 | struct mlx5_ib_dct dct; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 371 | }; |
Yonatan Cohen | 388ca8b | 2018-01-02 16:08:06 +0200 | [diff] [blame] | 372 | struct mlx5_frag_buf buf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 373 | |
| 374 | struct mlx5_db db; |
| 375 | struct mlx5_ib_wq rq; |
| 376 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 377 | u8 sq_signal_bits; |
Max Gurtovoy | 6e8484c | 2017-05-28 10:53:11 +0300 | [diff] [blame] | 378 | u8 next_fence; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 379 | struct mlx5_ib_wq sq; |
| 380 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 381 | /* serialize qp state modifications |
| 382 | */ |
| 383 | struct mutex mutex; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 384 | u32 flags; |
| 385 | u8 port; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 386 | u8 state; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 387 | int wq_sig; |
| 388 | int scat_cqe; |
| 389 | int max_inline_data; |
Eli Cohen | 5fe9dec | 2017-01-03 23:55:25 +0200 | [diff] [blame] | 390 | struct mlx5_bf bf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 391 | int has_rq; |
| 392 | |
| 393 | /* only for user space QPs. For kernel |
| 394 | * we have it from the bf object |
| 395 | */ |
Eli Cohen | 2f5ff26 | 2017-01-03 23:55:21 +0200 | [diff] [blame] | 396 | int bfregn; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 397 | |
| 398 | int create_type; |
Sagi Grimberg | e1e66cc | 2014-02-23 14:19:07 +0200 | [diff] [blame] | 399 | |
| 400 | /* Store signature errors */ |
| 401 | bool signature_en; |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 402 | |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 403 | struct list_head qps_list; |
| 404 | struct list_head cq_recv_list; |
| 405 | struct list_head cq_send_list; |
Bodong Wang | 61147f3 | 2018-03-19 15:10:30 +0200 | [diff] [blame] | 406 | struct mlx5_rate_limit rl; |
Yishai Hadas | c2e53b2 | 2017-06-08 16:15:08 +0300 | [diff] [blame] | 407 | u32 underlay_qpn; |
Maor Gottlieb | f95ef6c | 2017-10-19 08:25:55 +0300 | [diff] [blame] | 408 | bool tunnel_offload_en; |
Moni Shoua | b4aaa1f | 2018-01-02 16:19:31 +0200 | [diff] [blame] | 409 | /* storage for qp sub type when core qp type is IB_QPT_DRIVER */ |
| 410 | enum ib_qp_type qp_sub_type; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 411 | }; |
| 412 | |
| 413 | struct mlx5_ib_cq_buf { |
Yonatan Cohen | 388ca8b | 2018-01-02 16:08:06 +0200 | [diff] [blame] | 414 | struct mlx5_frag_buf_ctrl fbc; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 415 | struct ib_umem *umem; |
| 416 | int cqe_size; |
Eli Cohen | bde5158 | 2014-01-14 17:45:18 +0200 | [diff] [blame] | 417 | int nent; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 418 | }; |
| 419 | |
| 420 | enum mlx5_ib_qp_flags { |
Erez Shitrit | f031396 | 2016-02-21 16:27:17 +0200 | [diff] [blame] | 421 | MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, |
| 422 | MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, |
| 423 | MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL, |
| 424 | MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND, |
| 425 | MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV, |
| 426 | MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5, |
Haggai Eran | b11a4f9 | 2016-02-29 15:45:03 +0200 | [diff] [blame] | 427 | /* QP uses 1 as its source QP number */ |
| 428 | MLX5_IB_QP_SQPN_QP1 = 1 << 6, |
Majd Dibbiny | 358e42e | 2016-04-17 17:19:37 +0300 | [diff] [blame] | 429 | MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, |
Yishai Hadas | d9f88e5 | 2016-08-28 10:58:37 +0300 | [diff] [blame] | 430 | MLX5_IB_QP_RSS = 1 << 8, |
Noa Osherovich | e4cc4fa | 2017-01-18 15:40:03 +0200 | [diff] [blame] | 431 | MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9, |
Yishai Hadas | c2e53b2 | 2017-06-08 16:15:08 +0300 | [diff] [blame] | 432 | MLX5_IB_QP_UNDERLAY = 1 << 10, |
Noa Osherovich | b1383aa | 2017-10-29 13:59:45 +0200 | [diff] [blame] | 433 | MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11, |
Maor Gottlieb | f95ef6c | 2017-10-19 08:25:55 +0300 | [diff] [blame] | 434 | MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 435 | }; |
| 436 | |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 437 | struct mlx5_umr_wr { |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 438 | struct ib_send_wr wr; |
Artemy Kovalyov | 3161625 | 2017-01-02 11:37:42 +0200 | [diff] [blame] | 439 | u64 virt_addr; |
| 440 | u64 offset; |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 441 | struct ib_pd *pd; |
| 442 | unsigned int page_shift; |
Artemy Kovalyov | 3161625 | 2017-01-02 11:37:42 +0200 | [diff] [blame] | 443 | unsigned int xlt_size; |
Maor Gottlieb | b216af4 | 2016-11-27 15:18:22 +0200 | [diff] [blame] | 444 | u64 length; |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 445 | int access_flags; |
| 446 | u32 mkey; |
| 447 | }; |
| 448 | |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 449 | static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr) |
| 450 | { |
| 451 | return container_of(wr, struct mlx5_umr_wr, wr); |
| 452 | } |
| 453 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 454 | struct mlx5_shared_mr_info { |
| 455 | int mr_id; |
| 456 | struct ib_umem *umem; |
| 457 | }; |
| 458 | |
Guy Levi | 7a0c8f4 | 2017-10-19 08:25:53 +0300 | [diff] [blame] | 459 | enum mlx5_ib_cq_pr_flags { |
| 460 | MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, |
| 461 | }; |
| 462 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 463 | struct mlx5_ib_cq { |
| 464 | struct ib_cq ibcq; |
| 465 | struct mlx5_core_cq mcq; |
| 466 | struct mlx5_ib_cq_buf buf; |
| 467 | struct mlx5_db db; |
| 468 | |
| 469 | /* serialize access to the CQ |
| 470 | */ |
| 471 | spinlock_t lock; |
| 472 | |
| 473 | /* protect resize cq |
| 474 | */ |
| 475 | struct mutex resize_mutex; |
Eli Cohen | bde5158 | 2014-01-14 17:45:18 +0200 | [diff] [blame] | 476 | struct mlx5_ib_cq_buf *resize_buf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 477 | struct ib_umem *resize_umem; |
| 478 | int cqe_size; |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 479 | struct list_head list_send_qp; |
| 480 | struct list_head list_recv_qp; |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 481 | u32 create_flags; |
Haggai Eran | 25361e0 | 2016-02-29 15:45:08 +0200 | [diff] [blame] | 482 | struct list_head wc_list; |
| 483 | enum ib_cq_notify_flags notify_flags; |
| 484 | struct work_struct notify_work; |
Guy Levi | 7a0c8f4 | 2017-10-19 08:25:53 +0300 | [diff] [blame] | 485 | u16 private_flags; /* Use mlx5_ib_cq_pr_flags */ |
Haggai Eran | 25361e0 | 2016-02-29 15:45:08 +0200 | [diff] [blame] | 486 | }; |
| 487 | |
| 488 | struct mlx5_ib_wc { |
| 489 | struct ib_wc wc; |
| 490 | struct list_head list; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 491 | }; |
| 492 | |
| 493 | struct mlx5_ib_srq { |
| 494 | struct ib_srq ibsrq; |
| 495 | struct mlx5_core_srq msrq; |
Yonatan Cohen | 388ca8b | 2018-01-02 16:08:06 +0200 | [diff] [blame] | 496 | struct mlx5_frag_buf buf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 497 | struct mlx5_db db; |
| 498 | u64 *wrid; |
| 499 | /* protect SRQ hanlding |
| 500 | */ |
| 501 | spinlock_t lock; |
| 502 | int head; |
| 503 | int tail; |
| 504 | u16 wqe_ctr; |
| 505 | struct ib_umem *umem; |
| 506 | /* serialize arming a SRQ |
| 507 | */ |
| 508 | struct mutex mutex; |
| 509 | int wq_sig; |
| 510 | }; |
| 511 | |
| 512 | struct mlx5_ib_xrcd { |
| 513 | struct ib_xrcd ibxrcd; |
| 514 | u32 xrcdn; |
| 515 | }; |
| 516 | |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 517 | enum mlx5_ib_mtt_access_flags { |
| 518 | MLX5_IB_MTT_READ = (1 << 0), |
| 519 | MLX5_IB_MTT_WRITE = (1 << 1), |
| 520 | }; |
| 521 | |
| 522 | #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) |
| 523 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 524 | struct mlx5_ib_mr { |
| 525 | struct ib_mr ibmr; |
Sagi Grimberg | 8a187ee | 2015-10-13 19:11:26 +0300 | [diff] [blame] | 526 | void *descs; |
| 527 | dma_addr_t desc_map; |
| 528 | int ndescs; |
| 529 | int max_descs; |
| 530 | int desc_size; |
Sagi Grimberg | b005d31 | 2016-02-29 19:07:33 +0200 | [diff] [blame] | 531 | int access_mode; |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 532 | struct mlx5_core_mkey mmkey; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 533 | struct ib_umem *umem; |
| 534 | struct mlx5_shared_mr_info *smr_info; |
| 535 | struct list_head list; |
| 536 | int order; |
Ilya Lesokhin | 8b7ff7f | 2017-08-17 15:52:29 +0300 | [diff] [blame] | 537 | bool allocated_from_cache; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 538 | int npages; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 539 | struct mlx5_ib_dev *dev; |
Saeed Mahameed | ec22eb5 | 2016-07-16 06:28:36 +0300 | [diff] [blame] | 540 | u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; |
Sagi Grimberg | 3121e3c | 2014-02-23 14:19:06 +0200 | [diff] [blame] | 541 | struct mlx5_core_sig_ctx *sig; |
Haggai Eran | b4cfe44 | 2014-12-11 17:04:26 +0200 | [diff] [blame] | 542 | int live; |
Sagi Grimberg | 8a187ee | 2015-10-13 19:11:26 +0300 | [diff] [blame] | 543 | void *descs_alloc; |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 544 | int access_flags; /* Needed for rereg MR */ |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 545 | |
| 546 | struct mlx5_ib_mr *parent; |
| 547 | atomic_t num_leaf_free; |
| 548 | wait_queue_head_t q_leaf_free; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 549 | }; |
| 550 | |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 551 | struct mlx5_ib_mw { |
| 552 | struct ib_mw ibmw; |
| 553 | struct mlx5_core_mkey mmkey; |
Artemy Kovalyov | db570d7 | 2017-04-05 09:23:59 +0300 | [diff] [blame] | 554 | int ndescs; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 555 | }; |
| 556 | |
Shachar Raindel | a74d241 | 2014-05-22 14:50:12 +0300 | [diff] [blame] | 557 | struct mlx5_ib_umr_context { |
Christoph Hellwig | add08d7 | 2016-03-03 09:38:22 +0100 | [diff] [blame] | 558 | struct ib_cqe cqe; |
Shachar Raindel | a74d241 | 2014-05-22 14:50:12 +0300 | [diff] [blame] | 559 | enum ib_wc_status status; |
| 560 | struct completion done; |
| 561 | }; |
| 562 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 563 | struct umr_common { |
| 564 | struct ib_pd *pd; |
| 565 | struct ib_cq *cq; |
| 566 | struct ib_qp *qp; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 567 | /* control access to UMR QP |
| 568 | */ |
| 569 | struct semaphore sem; |
| 570 | }; |
| 571 | |
| 572 | enum { |
| 573 | MLX5_FMR_INVALID, |
| 574 | MLX5_FMR_VALID, |
| 575 | MLX5_FMR_BUSY, |
| 576 | }; |
| 577 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 578 | struct mlx5_cache_ent { |
| 579 | struct list_head head; |
| 580 | /* sync access to the cahce entry |
| 581 | */ |
| 582 | spinlock_t lock; |
| 583 | |
| 584 | |
| 585 | struct dentry *dir; |
| 586 | char name[4]; |
| 587 | u32 order; |
Artemy Kovalyov | 49780d4 | 2017-01-18 16:58:10 +0200 | [diff] [blame] | 588 | u32 xlt; |
| 589 | u32 access_mode; |
| 590 | u32 page; |
| 591 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 592 | u32 size; |
| 593 | u32 cur; |
| 594 | u32 miss; |
| 595 | u32 limit; |
| 596 | |
| 597 | struct dentry *fsize; |
| 598 | struct dentry *fcur; |
| 599 | struct dentry *fmiss; |
| 600 | struct dentry *flimit; |
| 601 | |
| 602 | struct mlx5_ib_dev *dev; |
| 603 | struct work_struct work; |
| 604 | struct delayed_work dwork; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 605 | int pending; |
Artemy Kovalyov | 49780d4 | 2017-01-18 16:58:10 +0200 | [diff] [blame] | 606 | struct completion compl; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 607 | }; |
| 608 | |
| 609 | struct mlx5_mr_cache { |
| 610 | struct workqueue_struct *wq; |
| 611 | struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; |
| 612 | int stopped; |
| 613 | struct dentry *root; |
| 614 | unsigned long last_add; |
| 615 | }; |
| 616 | |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 617 | struct mlx5_ib_gsi_qp; |
| 618 | |
| 619 | struct mlx5_ib_port_resources { |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 620 | struct mlx5_ib_resources *devr; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 621 | struct mlx5_ib_gsi_qp *gsi; |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 622 | struct work_struct pkey_change_work; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 623 | }; |
| 624 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 625 | struct mlx5_ib_resources { |
| 626 | struct ib_cq *c0; |
| 627 | struct ib_xrcd *x0; |
| 628 | struct ib_xrcd *x1; |
| 629 | struct ib_pd *p0; |
| 630 | struct ib_srq *s0; |
Haggai Abramonvsky | 4aa17b2 | 2015-06-04 19:30:48 +0300 | [diff] [blame] | 631 | struct ib_srq *s1; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 632 | struct mlx5_ib_port_resources ports[2]; |
| 633 | /* Protects changes to the port resources */ |
| 634 | struct mutex mutex; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 635 | }; |
| 636 | |
Parav Pandit | e1f24a7 | 2017-04-16 07:29:29 +0300 | [diff] [blame] | 637 | struct mlx5_ib_counters { |
Kamal Heib | 7c16f47 | 2017-01-18 15:25:09 +0200 | [diff] [blame] | 638 | const char **names; |
| 639 | size_t *offsets; |
Parav Pandit | e1f24a7 | 2017-04-16 07:29:29 +0300 | [diff] [blame] | 640 | u32 num_q_counters; |
| 641 | u32 num_cong_counters; |
Kamal Heib | 7c16f47 | 2017-01-18 15:25:09 +0200 | [diff] [blame] | 642 | u16 set_id; |
Daniel Jurgens | aac4492 | 2018-01-04 17:25:40 +0200 | [diff] [blame] | 643 | bool set_id_valid; |
Kamal Heib | 7c16f47 | 2017-01-18 15:25:09 +0200 | [diff] [blame] | 644 | }; |
| 645 | |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 646 | struct mlx5_ib_multiport_info; |
| 647 | |
| 648 | struct mlx5_ib_multiport { |
| 649 | struct mlx5_ib_multiport_info *mpi; |
| 650 | /* To be held when accessing the multiport info */ |
| 651 | spinlock_t mpi_lock; |
| 652 | }; |
| 653 | |
Mark Bloch | 0837e86 | 2016-06-17 15:10:55 +0300 | [diff] [blame] | 654 | struct mlx5_ib_port { |
Parav Pandit | e1f24a7 | 2017-04-16 07:29:29 +0300 | [diff] [blame] | 655 | struct mlx5_ib_counters cnts; |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 656 | struct mlx5_ib_multiport mp; |
Parav Pandit | a9e546e | 2018-01-04 17:25:39 +0200 | [diff] [blame] | 657 | struct mlx5_ib_dbg_cc_params *dbg_cc_params; |
Mark Bloch | 0837e86 | 2016-06-17 15:10:55 +0300 | [diff] [blame] | 658 | }; |
| 659 | |
Achiad Shochat | fc24fc5 | 2015-12-23 18:47:17 +0200 | [diff] [blame] | 660 | struct mlx5_roce { |
| 661 | /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL |
| 662 | * netdev pointer |
| 663 | */ |
| 664 | rwlock_t netdev_lock; |
| 665 | struct net_device *netdev; |
| 666 | struct notifier_block nb; |
Aviv Heller | 13eab21 | 2016-09-18 20:48:04 +0300 | [diff] [blame] | 667 | atomic_t next_port; |
Moni Shoua | fd65f1b | 2017-05-30 09:56:05 +0300 | [diff] [blame] | 668 | enum ib_port_state last_port_state; |
Daniel Jurgens | 7fd8aef | 2018-01-04 17:25:35 +0200 | [diff] [blame] | 669 | struct mlx5_ib_dev *dev; |
| 670 | u8 native_port_num; |
Achiad Shochat | fc24fc5 | 2015-12-23 18:47:17 +0200 | [diff] [blame] | 671 | }; |
| 672 | |
Parav Pandit | 4a2da0b | 2017-05-30 10:05:15 +0300 | [diff] [blame] | 673 | struct mlx5_ib_dbg_param { |
| 674 | int offset; |
| 675 | struct mlx5_ib_dev *dev; |
| 676 | struct dentry *dentry; |
Parav Pandit | a9e546e | 2018-01-04 17:25:39 +0200 | [diff] [blame] | 677 | u8 port_num; |
Parav Pandit | 4a2da0b | 2017-05-30 10:05:15 +0300 | [diff] [blame] | 678 | }; |
| 679 | |
| 680 | enum mlx5_ib_dbg_cc_types { |
| 681 | MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE, |
| 682 | MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI, |
| 683 | MLX5_IB_DBG_CC_RP_TIME_RESET, |
| 684 | MLX5_IB_DBG_CC_RP_BYTE_RESET, |
| 685 | MLX5_IB_DBG_CC_RP_THRESHOLD, |
| 686 | MLX5_IB_DBG_CC_RP_AI_RATE, |
| 687 | MLX5_IB_DBG_CC_RP_HAI_RATE, |
| 688 | MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, |
| 689 | MLX5_IB_DBG_CC_RP_MIN_RATE, |
| 690 | MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP, |
| 691 | MLX5_IB_DBG_CC_RP_DCE_TCP_G, |
| 692 | MLX5_IB_DBG_CC_RP_DCE_TCP_RTT, |
| 693 | MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, |
| 694 | MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, |
| 695 | MLX5_IB_DBG_CC_RP_GD, |
| 696 | MLX5_IB_DBG_CC_NP_CNP_DSCP, |
| 697 | MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, |
| 698 | MLX5_IB_DBG_CC_NP_CNP_PRIO, |
| 699 | MLX5_IB_DBG_CC_MAX, |
| 700 | }; |
| 701 | |
| 702 | struct mlx5_ib_dbg_cc_params { |
| 703 | struct dentry *root; |
| 704 | struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX]; |
| 705 | }; |
| 706 | |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 707 | enum { |
| 708 | MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100, |
| 709 | }; |
| 710 | |
Maor Gottlieb | fe248c3 | 2017-05-30 10:29:14 +0300 | [diff] [blame] | 711 | struct mlx5_ib_dbg_delay_drop { |
| 712 | struct dentry *dir_debugfs; |
| 713 | struct dentry *rqs_cnt_debugfs; |
| 714 | struct dentry *events_cnt_debugfs; |
| 715 | struct dentry *timeout_debugfs; |
| 716 | }; |
| 717 | |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 718 | struct mlx5_ib_delay_drop { |
| 719 | struct mlx5_ib_dev *dev; |
| 720 | struct work_struct delay_drop_work; |
| 721 | /* serialize setting of delay drop */ |
| 722 | struct mutex lock; |
| 723 | u32 timeout; |
| 724 | bool activate; |
Maor Gottlieb | fe248c3 | 2017-05-30 10:29:14 +0300 | [diff] [blame] | 725 | atomic_t events_cnt; |
| 726 | atomic_t rqs_cnt; |
| 727 | struct mlx5_ib_dbg_delay_drop *dbg; |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 728 | }; |
| 729 | |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 730 | enum mlx5_ib_stages { |
| 731 | MLX5_IB_STAGE_INIT, |
Mark Bloch | 9a4ca38 | 2018-01-16 14:42:35 +0000 | [diff] [blame] | 732 | MLX5_IB_STAGE_FLOW_DB, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 733 | MLX5_IB_STAGE_CAPS, |
Mark Bloch | 8e6efa3 | 2017-11-06 12:22:13 +0000 | [diff] [blame] | 734 | MLX5_IB_STAGE_NON_DEFAULT_CB, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 735 | MLX5_IB_STAGE_ROCE, |
| 736 | MLX5_IB_STAGE_DEVICE_RESOURCES, |
| 737 | MLX5_IB_STAGE_ODP, |
| 738 | MLX5_IB_STAGE_COUNTERS, |
| 739 | MLX5_IB_STAGE_CONG_DEBUGFS, |
| 740 | MLX5_IB_STAGE_UAR, |
| 741 | MLX5_IB_STAGE_BFREG, |
Mark Bloch | 42cea83 | 2018-03-14 09:14:15 +0200 | [diff] [blame] | 742 | MLX5_IB_STAGE_PRE_IB_REG_UMR, |
Matan Barak | 8c84660 | 2018-03-28 09:27:41 +0300 | [diff] [blame] | 743 | MLX5_IB_STAGE_SPECS, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 744 | MLX5_IB_STAGE_IB_REG, |
Mark Bloch | 42cea83 | 2018-03-14 09:14:15 +0200 | [diff] [blame] | 745 | MLX5_IB_STAGE_POST_IB_REG_UMR, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 746 | MLX5_IB_STAGE_DELAY_DROP, |
| 747 | MLX5_IB_STAGE_CLASS_ATTR, |
Mark Bloch | fc385b7 | 2018-01-16 14:34:48 +0000 | [diff] [blame] | 748 | MLX5_IB_STAGE_REP_REG, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 749 | MLX5_IB_STAGE_MAX, |
| 750 | }; |
| 751 | |
| 752 | struct mlx5_ib_stage { |
| 753 | int (*init)(struct mlx5_ib_dev *dev); |
| 754 | void (*cleanup)(struct mlx5_ib_dev *dev); |
| 755 | }; |
| 756 | |
| 757 | #define STAGE_CREATE(_stage, _init, _cleanup) \ |
| 758 | .stage[_stage] = {.init = _init, .cleanup = _cleanup} |
| 759 | |
| 760 | struct mlx5_ib_profile { |
| 761 | struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX]; |
| 762 | }; |
| 763 | |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 764 | struct mlx5_ib_multiport_info { |
| 765 | struct list_head list; |
| 766 | struct mlx5_ib_dev *ibdev; |
| 767 | struct mlx5_core_dev *mdev; |
| 768 | struct completion unref_comp; |
| 769 | u64 sys_image_guid; |
| 770 | u32 mdev_refcnt; |
| 771 | bool is_master; |
| 772 | bool unaffiliate; |
| 773 | }; |
| 774 | |
Aviad Yehezkel | c6475a0 | 2018-03-28 09:27:50 +0300 | [diff] [blame^] | 775 | struct mlx5_ib_flow_action { |
| 776 | struct ib_flow_action ib_action; |
| 777 | union { |
| 778 | struct { |
| 779 | u64 ib_flags; |
| 780 | struct mlx5_accel_esp_xfrm *ctx; |
| 781 | } esp_aes_gcm; |
| 782 | }; |
| 783 | }; |
| 784 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 785 | struct mlx5_ib_dev { |
| 786 | struct ib_device ib_dev; |
Jack Morgenstein | 9603b61 | 2014-07-28 23:30:22 +0300 | [diff] [blame] | 787 | struct mlx5_core_dev *mdev; |
Daniel Jurgens | 7fd8aef | 2018-01-04 17:25:35 +0200 | [diff] [blame] | 788 | struct mlx5_roce roce[MLX5_MAX_PORTS]; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 789 | int num_ports; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 790 | /* serialize update of capability mask |
| 791 | */ |
| 792 | struct mutex cap_mask_mutex; |
| 793 | bool ib_active; |
| 794 | struct umr_common umrc; |
| 795 | /* sync used page count stats |
| 796 | */ |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 797 | struct mlx5_ib_resources devr; |
| 798 | struct mlx5_mr_cache cache; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 799 | struct timer_list delay_timer; |
Moshe Lazer | 6bc1a65 | 2016-10-27 16:36:42 +0300 | [diff] [blame] | 800 | /* Prevents soft lock on massive reg MRs */ |
| 801 | struct mutex slow_path_mutex; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 802 | int fill_delay; |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 803 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
| 804 | struct ib_odp_caps odp_caps; |
Artemy Kovalyov | c438fde | 2017-01-02 11:37:43 +0200 | [diff] [blame] | 805 | u64 odp_max_size; |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 806 | /* |
| 807 | * Sleepable RCU that prevents destruction of MRs while they are still |
| 808 | * being used by a page fault handler. |
| 809 | */ |
| 810 | struct srcu_struct mr_srcu; |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 811 | u32 null_mkey; |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 812 | #endif |
Mark Bloch | 9a4ca38 | 2018-01-16 14:42:35 +0000 | [diff] [blame] | 813 | struct mlx5_ib_flow_db *flow_db; |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 814 | /* protect resources needed as part of reset flow */ |
| 815 | spinlock_t reset_flow_resource_lock; |
| 816 | struct list_head qp_list; |
Mark Bloch | 0837e86 | 2016-06-17 15:10:55 +0300 | [diff] [blame] | 817 | /* Array with num_ports elements */ |
| 818 | struct mlx5_ib_port *port; |
Huy Nguyen | c85023e | 2017-05-30 09:42:54 +0300 | [diff] [blame] | 819 | struct mlx5_sq_bfreg bfreg; |
| 820 | struct mlx5_sq_bfreg fp_bfreg; |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 821 | struct mlx5_ib_delay_drop delay_drop; |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 822 | const struct mlx5_ib_profile *profile; |
Mark Bloch | fc385b7 | 2018-01-16 14:34:48 +0000 | [diff] [blame] | 823 | struct mlx5_eswitch_rep *rep; |
Huy Nguyen | c85023e | 2017-05-30 09:42:54 +0300 | [diff] [blame] | 824 | |
| 825 | /* protect the user_td */ |
| 826 | struct mutex lb_mutex; |
| 827 | u32 user_td; |
| 828 | u8 umr_fence; |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 829 | struct list_head ib_dev_list; |
| 830 | u64 sys_image_guid; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 831 | }; |
| 832 | |
| 833 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) |
| 834 | { |
| 835 | return container_of(mcq, struct mlx5_ib_cq, mcq); |
| 836 | } |
| 837 | |
| 838 | static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) |
| 839 | { |
| 840 | return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); |
| 841 | } |
| 842 | |
| 843 | static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) |
| 844 | { |
| 845 | return container_of(ibdev, struct mlx5_ib_dev, ib_dev); |
| 846 | } |
| 847 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 848 | static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) |
| 849 | { |
| 850 | return container_of(ibcq, struct mlx5_ib_cq, ibcq); |
| 851 | } |
| 852 | |
| 853 | static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) |
| 854 | { |
majd@mellanox.com | 19098df | 2016-01-14 19:13:03 +0200 | [diff] [blame] | 855 | return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 856 | } |
| 857 | |
Yishai Hadas | 350d0e4 | 2016-08-28 14:58:18 +0300 | [diff] [blame] | 858 | static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp) |
| 859 | { |
| 860 | return container_of(core_qp, struct mlx5_ib_rwq, core_qp); |
| 861 | } |
| 862 | |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 863 | static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey) |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 864 | { |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 865 | return container_of(mmkey, struct mlx5_ib_mr, mmkey); |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 866 | } |
| 867 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 868 | static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) |
| 869 | { |
| 870 | return container_of(ibpd, struct mlx5_ib_pd, ibpd); |
| 871 | } |
| 872 | |
| 873 | static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) |
| 874 | { |
| 875 | return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); |
| 876 | } |
| 877 | |
| 878 | static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) |
| 879 | { |
| 880 | return container_of(ibqp, struct mlx5_ib_qp, ibqp); |
| 881 | } |
| 882 | |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 883 | static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) |
| 884 | { |
| 885 | return container_of(ibwq, struct mlx5_ib_rwq, ibwq); |
| 886 | } |
| 887 | |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 888 | static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) |
| 889 | { |
| 890 | return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); |
| 891 | } |
| 892 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 893 | static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) |
| 894 | { |
| 895 | return container_of(msrq, struct mlx5_ib_srq, msrq); |
| 896 | } |
| 897 | |
| 898 | static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) |
| 899 | { |
| 900 | return container_of(ibmr, struct mlx5_ib_mr, ibmr); |
| 901 | } |
| 902 | |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 903 | static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) |
| 904 | { |
| 905 | return container_of(ibmw, struct mlx5_ib_mw, ibmw); |
| 906 | } |
| 907 | |
Aviad Yehezkel | c6475a0 | 2018-03-28 09:27:50 +0300 | [diff] [blame^] | 908 | static inline struct mlx5_ib_flow_action * |
| 909 | to_mflow_act(struct ib_flow_action *ibact) |
| 910 | { |
| 911 | return container_of(ibact, struct mlx5_ib_flow_action, ib_action); |
| 912 | } |
| 913 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 914 | int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, |
| 915 | struct mlx5_db *db); |
| 916 | void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); |
| 917 | void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
| 918 | void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
| 919 | void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); |
| 920 | int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, |
Ira Weiny | a97e2d8 | 2015-05-31 17:15:30 -0400 | [diff] [blame] | 921 | u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
| 922 | const void *in_mad, void *response_mad); |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 923 | struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
Moni Shoua | 477864c | 2016-11-23 08:23:24 +0200 | [diff] [blame] | 924 | struct ib_udata *udata); |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 925 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 926 | int mlx5_ib_destroy_ah(struct ib_ah *ah); |
| 927 | struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, |
| 928 | struct ib_srq_init_attr *init_attr, |
| 929 | struct ib_udata *udata); |
| 930 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
| 931 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
| 932 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); |
| 933 | int mlx5_ib_destroy_srq(struct ib_srq *srq); |
| 934 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, |
| 935 | struct ib_recv_wr **bad_wr); |
| 936 | struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, |
| 937 | struct ib_qp_init_attr *init_attr, |
| 938 | struct ib_udata *udata); |
| 939 | int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
| 940 | int attr_mask, struct ib_udata *udata); |
| 941 | int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, |
| 942 | struct ib_qp_init_attr *qp_init_attr); |
| 943 | int mlx5_ib_destroy_qp(struct ib_qp *qp); |
| 944 | int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |
| 945 | struct ib_send_wr **bad_wr); |
| 946 | int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, |
| 947 | struct ib_recv_wr **bad_wr); |
| 948 | void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); |
Haggai Eran | c1395a2 | 2014-12-11 17:04:14 +0200 | [diff] [blame] | 949 | int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, |
majd@mellanox.com | 19098df | 2016-01-14 19:13:03 +0200 | [diff] [blame] | 950 | void *buffer, u32 length, |
| 951 | struct mlx5_ib_qp_base *base); |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 952 | struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, |
| 953 | const struct ib_cq_init_attr *attr, |
| 954 | struct ib_ucontext *context, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 955 | struct ib_udata *udata); |
| 956 | int mlx5_ib_destroy_cq(struct ib_cq *cq); |
| 957 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
| 958 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
| 959 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
| 960 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); |
| 961 | struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); |
| 962 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
| 963 | u64 virt_addr, int access_flags, |
| 964 | struct ib_udata *udata); |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 965 | struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
| 966 | struct ib_udata *udata); |
| 967 | int mlx5_ib_dealloc_mw(struct ib_mw *mw); |
Artemy Kovalyov | 7d0cc6e | 2017-01-02 11:37:44 +0200 | [diff] [blame] | 968 | int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, |
| 969 | int page_shift, int flags); |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 970 | struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, |
| 971 | int access_flags); |
| 972 | void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 973 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, |
| 974 | u64 length, u64 virt_addr, int access_flags, |
| 975 | struct ib_pd *pd, struct ib_udata *udata); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 976 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr); |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 977 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, |
| 978 | enum ib_mr_type mr_type, |
| 979 | u32 max_num_sg); |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 980 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 981 | unsigned int *sg_offset); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 982 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
Ira Weiny | a97e2d8 | 2015-05-31 17:15:30 -0400 | [diff] [blame] | 983 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
Ira Weiny | 4cd7c94 | 2015-06-06 14:38:31 -0400 | [diff] [blame] | 984 | const struct ib_mad_hdr *in, size_t in_mad_size, |
| 985 | struct ib_mad_hdr *out, size_t *out_mad_size, |
| 986 | u16 *out_mad_pkey_index); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 987 | struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, |
| 988 | struct ib_ucontext *context, |
| 989 | struct ib_udata *udata); |
| 990 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 991 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); |
| 992 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); |
Majd Dibbiny | 1b5daf1 | 2015-06-04 19:30:46 +0300 | [diff] [blame] | 993 | int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, |
| 994 | struct ib_smp *out_mad); |
| 995 | int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, |
| 996 | __be64 *sys_image_guid); |
| 997 | int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, |
| 998 | u16 *max_pkeys); |
| 999 | int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, |
| 1000 | u32 *vendor_id); |
| 1001 | int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); |
| 1002 | int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); |
| 1003 | int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, |
| 1004 | u16 *pkey); |
| 1005 | int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, |
| 1006 | union ib_gid *gid); |
| 1007 | int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, |
| 1008 | struct ib_port_attr *props); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1009 | int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, |
| 1010 | struct ib_port_attr *props); |
| 1011 | int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); |
| 1012 | void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); |
Majd Dibbiny | 762f899 | 2016-10-27 16:36:47 +0300 | [diff] [blame] | 1013 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, |
| 1014 | unsigned long max_page_shift, |
| 1015 | int *count, int *shift, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1016 | int *ncont, int *order); |
Haggai Eran | 832a6b0 | 2014-12-11 17:04:22 +0200 | [diff] [blame] | 1017 | void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, |
| 1018 | int page_shift, size_t offset, size_t num_pages, |
| 1019 | __be64 *pas, int access_flags); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1020 | void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 1021 | int page_shift, __be64 *pas, int access_flags); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1022 | void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); |
| 1023 | int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); |
| 1024 | int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); |
| 1025 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); |
Artemy Kovalyov | 49780d4 | 2017-01-18 16:58:10 +0200 | [diff] [blame] | 1026 | |
| 1027 | struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry); |
| 1028 | void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 1029 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, |
| 1030 | struct ib_mr_status *mr_status); |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 1031 | struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, |
| 1032 | struct ib_wq_init_attr *init_attr, |
| 1033 | struct ib_udata *udata); |
| 1034 | int mlx5_ib_destroy_wq(struct ib_wq *wq); |
| 1035 | int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
| 1036 | u32 wq_attr_mask, struct ib_udata *udata); |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 1037 | struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, |
| 1038 | struct ib_rwq_ind_table_init_attr *init_attr, |
| 1039 | struct ib_udata *udata); |
| 1040 | int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); |
Moni Shoua | 776a390 | 2018-01-02 16:19:33 +0200 | [diff] [blame] | 1041 | bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev); |
| 1042 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1043 | |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1044 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 1045 | void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); |
Artemy Kovalyov | d9aaed8 | 2017-01-02 11:37:46 +0200 | [diff] [blame] | 1046 | void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, |
| 1047 | struct mlx5_pagefault *pfault); |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1048 | int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1049 | int __init mlx5_ib_odp_init(void); |
| 1050 | void mlx5_ib_odp_cleanup(void); |
Haggai Eran | b4cfe44 | 2014-12-11 17:04:26 +0200 | [diff] [blame] | 1051 | void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, |
| 1052 | unsigned long end); |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 1053 | void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); |
| 1054 | void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, |
| 1055 | size_t nentries, struct mlx5_ib_mr *mr, int flags); |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1056 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 1057 | static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1058 | { |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 1059 | return; |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1060 | } |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1061 | |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1062 | static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1063 | static inline int mlx5_ib_odp_init(void) { return 0; } |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 1064 | static inline void mlx5_ib_odp_cleanup(void) {} |
| 1065 | static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} |
| 1066 | static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, |
| 1067 | size_t nentries, struct mlx5_ib_mr *mr, |
| 1068 | int flags) {} |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1069 | |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1070 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
| 1071 | |
Mark Bloch | b5ca15a | 2018-01-23 11:16:30 +0000 | [diff] [blame] | 1072 | /* Needed for rep profile */ |
| 1073 | int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev); |
| 1074 | void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev); |
| 1075 | int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev); |
| 1076 | int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev); |
| 1077 | int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev); |
| 1078 | int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev); |
| 1079 | void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev); |
| 1080 | int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev); |
| 1081 | void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev); |
| 1082 | int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev); |
| 1083 | void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev); |
| 1084 | int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev); |
| 1085 | void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev); |
Doug Ledford | 2d87344 | 2018-03-14 18:49:12 -0400 | [diff] [blame] | 1086 | void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev); |
Mark Bloch | b5ca15a | 2018-01-23 11:16:30 +0000 | [diff] [blame] | 1087 | int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev); |
| 1088 | void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev); |
Doug Ledford | 2d87344 | 2018-03-14 18:49:12 -0400 | [diff] [blame] | 1089 | int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev); |
Mark Bloch | b5ca15a | 2018-01-23 11:16:30 +0000 | [diff] [blame] | 1090 | int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev); |
| 1091 | void __mlx5_ib_remove(struct mlx5_ib_dev *dev, |
| 1092 | const struct mlx5_ib_profile *profile, |
| 1093 | int stage); |
| 1094 | void *__mlx5_ib_add(struct mlx5_ib_dev *dev, |
| 1095 | const struct mlx5_ib_profile *profile); |
| 1096 | |
Arnd Bergmann | 9967c70 | 2016-03-23 11:37:45 +0100 | [diff] [blame] | 1097 | int mlx5_ib_get_vf_config(struct ib_device *device, int vf, |
| 1098 | u8 port, struct ifla_vf_info *info); |
| 1099 | int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, |
| 1100 | u8 port, int state); |
| 1101 | int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, |
| 1102 | u8 port, struct ifla_vf_stats *stats); |
| 1103 | int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, |
| 1104 | u64 guid, int type); |
| 1105 | |
Achiad Shochat | 2811ba5 | 2015-12-23 18:47:24 +0200 | [diff] [blame] | 1106 | __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, |
| 1107 | int index); |
Majd Dibbiny | ed88451 | 2017-01-18 14:10:35 +0200 | [diff] [blame] | 1108 | int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num, |
| 1109 | int index, enum ib_gid_type *gid_type); |
Achiad Shochat | 2811ba5 | 2015-12-23 18:47:24 +0200 | [diff] [blame] | 1110 | |
Parav Pandit | a9e546e | 2018-01-04 17:25:39 +0200 | [diff] [blame] | 1111 | void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); |
| 1112 | int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); |
Parav Pandit | 4a2da0b | 2017-05-30 10:05:15 +0300 | [diff] [blame] | 1113 | |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 1114 | /* GSI QP helper functions */ |
| 1115 | struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, |
| 1116 | struct ib_qp_init_attr *init_attr); |
| 1117 | int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp); |
| 1118 | int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, |
| 1119 | int attr_mask); |
| 1120 | int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
| 1121 | int qp_attr_mask, |
| 1122 | struct ib_qp_init_attr *qp_init_attr); |
| 1123 | int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr, |
| 1124 | struct ib_send_wr **bad_wr); |
| 1125 | int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr, |
| 1126 | struct ib_recv_wr **bad_wr); |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 1127 | void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 1128 | |
Haggai Eran | 25361e0 | 2016-02-29 15:45:08 +0200 | [diff] [blame] | 1129 | int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); |
| 1130 | |
Yishai Hadas | 4ed131d | 2017-12-24 16:31:35 +0200 | [diff] [blame] | 1131 | void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, |
| 1132 | int bfregn); |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 1133 | struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi); |
| 1134 | struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, |
| 1135 | u8 ib_port_num, |
| 1136 | u8 *native_port_num); |
| 1137 | void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, |
| 1138 | u8 port_num); |
Yishai Hadas | 4ed131d | 2017-12-24 16:31:35 +0200 | [diff] [blame] | 1139 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1140 | static inline void init_query_mad(struct ib_smp *mad) |
| 1141 | { |
| 1142 | mad->base_version = 1; |
| 1143 | mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; |
| 1144 | mad->class_version = 1; |
| 1145 | mad->method = IB_MGMT_METHOD_GET; |
| 1146 | } |
| 1147 | |
| 1148 | static inline u8 convert_access(int acc) |
| 1149 | { |
| 1150 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | |
| 1151 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | |
| 1152 | (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | |
| 1153 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | |
| 1154 | MLX5_PERM_LOCAL_READ; |
| 1155 | } |
| 1156 | |
Sagi Grimberg | b636401 | 2015-09-02 22:23:04 +0300 | [diff] [blame] | 1157 | static inline int is_qp1(enum ib_qp_type qp_type) |
| 1158 | { |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 1159 | return qp_type == MLX5_IB_QPT_HW_GSI; |
Sagi Grimberg | b636401 | 2015-09-02 22:23:04 +0300 | [diff] [blame] | 1160 | } |
| 1161 | |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 1162 | #define MLX5_MAX_UMR_SHIFT 16 |
| 1163 | #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT) |
| 1164 | |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 1165 | static inline u32 check_cq_create_flags(u32 flags) |
| 1166 | { |
| 1167 | /* |
| 1168 | * It returns non-zero value for unsupported CQ |
| 1169 | * create flags, otherwise it returns zero. |
| 1170 | */ |
Jason Gunthorpe | beb801a | 2018-01-26 15:16:46 -0700 | [diff] [blame] | 1171 | return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN | |
| 1172 | IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)); |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 1173 | } |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 1174 | |
| 1175 | static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx, |
| 1176 | u32 *user_index) |
| 1177 | { |
| 1178 | if (cqe_version) { |
| 1179 | if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) || |
| 1180 | (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK)) |
| 1181 | return -EINVAL; |
| 1182 | *user_index = cmd_uidx; |
| 1183 | } else { |
| 1184 | *user_index = MLX5_IB_DEFAULT_UIDX; |
| 1185 | } |
| 1186 | |
| 1187 | return 0; |
| 1188 | } |
Leon Romanovsky | 3085e29 | 2016-09-22 17:31:11 +0300 | [diff] [blame] | 1189 | |
| 1190 | static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, |
| 1191 | struct mlx5_ib_create_qp *ucmd, |
| 1192 | int inlen, |
| 1193 | u32 *user_index) |
| 1194 | { |
| 1195 | u8 cqe_version = ucontext->cqe_version; |
| 1196 | |
| 1197 | if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) && |
| 1198 | !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
| 1199 | return 0; |
| 1200 | |
| 1201 | if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) != |
| 1202 | !!cqe_version)) |
| 1203 | return -EINVAL; |
| 1204 | |
| 1205 | return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); |
| 1206 | } |
| 1207 | |
| 1208 | static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, |
| 1209 | struct mlx5_ib_create_srq *ucmd, |
| 1210 | int inlen, |
| 1211 | u32 *user_index) |
| 1212 | { |
| 1213 | u8 cqe_version = ucontext->cqe_version; |
| 1214 | |
| 1215 | if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) && |
| 1216 | !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
| 1217 | return 0; |
| 1218 | |
| 1219 | if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) != |
| 1220 | !!cqe_version)) |
| 1221 | return -EINVAL; |
| 1222 | |
| 1223 | return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); |
| 1224 | } |
Eli Cohen | b037c29 | 2017-01-03 23:55:26 +0200 | [diff] [blame] | 1225 | |
| 1226 | static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) |
| 1227 | { |
| 1228 | return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? |
| 1229 | MLX5_UARS_IN_PAGE : 1; |
| 1230 | } |
| 1231 | |
Yishai Hadas | 31a78a5 | 2017-12-24 16:31:34 +0200 | [diff] [blame] | 1232 | static inline int get_num_static_uars(struct mlx5_ib_dev *dev, |
| 1233 | struct mlx5_bfreg_info *bfregi) |
Eli Cohen | b037c29 | 2017-01-03 23:55:26 +0200 | [diff] [blame] | 1234 | { |
Yishai Hadas | 31a78a5 | 2017-12-24 16:31:34 +0200 | [diff] [blame] | 1235 | return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages; |
Eli Cohen | b037c29 | 2017-01-03 23:55:26 +0200 | [diff] [blame] | 1236 | } |
| 1237 | |
Ilya Lesokhin | c44ef99 | 2018-03-13 15:18:48 +0200 | [diff] [blame] | 1238 | unsigned long mlx5_ib_get_xlt_emergency_page(void); |
| 1239 | void mlx5_ib_put_xlt_emergency_page(void); |
| 1240 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1241 | #endif /* MLX5_IB_H */ |