Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1 | /* |
Saeed Mahameed | 6cf0a15 | 2015-04-02 17:07:30 +0300 | [diff] [blame] | 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef MLX5_IB_H |
| 34 | #define MLX5_IB_H |
| 35 | |
| 36 | #include <linux/kernel.h> |
| 37 | #include <linux/sched.h> |
| 38 | #include <rdma/ib_verbs.h> |
Leon Romanovsky | 8b4d5bc | 2019-01-08 16:07:25 +0200 | [diff] [blame] | 39 | #include <rdma/ib_umem.h> |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 40 | #include <rdma/ib_smi.h> |
| 41 | #include <linux/mlx5/driver.h> |
| 42 | #include <linux/mlx5/cq.h> |
Mark Bloch | b823dd6 | 2018-09-06 17:27:05 +0300 | [diff] [blame] | 43 | #include <linux/mlx5/fs.h> |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 44 | #include <linux/mlx5/qp.h> |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 45 | #include <linux/types.h> |
majd@mellanox.com | 146d2f1 | 2016-01-14 19:13:02 +0200 | [diff] [blame] | 46 | #include <linux/mlx5/transobj.h> |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 47 | #include <rdma/ib_user_verbs.h> |
Leon Romanovsky | 3085e29 | 2016-09-22 17:31:11 +0300 | [diff] [blame] | 48 | #include <rdma/mlx5-abi.h> |
Ariel Levkovich | 24da001 | 2018-04-05 18:53:27 +0300 | [diff] [blame] | 49 | #include <rdma/uverbs_ioctl.h> |
Yishai Hadas | fd44e38 | 2018-07-23 15:25:07 +0300 | [diff] [blame] | 50 | #include <rdma/mlx5_user_ioctl_cmds.h> |
Ariel Levkovich | 3b113a1 | 2019-05-05 17:07:11 +0300 | [diff] [blame] | 51 | #include <rdma/mlx5_user_ioctl_verbs.h> |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 52 | |
Leon Romanovsky | f3da657 | 2018-11-28 20:53:41 +0200 | [diff] [blame] | 53 | #include "srq.h" |
| 54 | |
Jason Gunthorpe | 5a738b5 | 2018-09-20 16:42:24 -0600 | [diff] [blame] | 55 | #define mlx5_ib_dbg(_dev, format, arg...) \ |
| 56 | dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
| 57 | __LINE__, current->pid, ##arg) |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 58 | |
Jason Gunthorpe | 5a738b5 | 2018-09-20 16:42:24 -0600 | [diff] [blame] | 59 | #define mlx5_ib_err(_dev, format, arg...) \ |
| 60 | dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
| 61 | __LINE__, current->pid, ##arg) |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 62 | |
Jason Gunthorpe | 5a738b5 | 2018-09-20 16:42:24 -0600 | [diff] [blame] | 63 | #define mlx5_ib_warn(_dev, format, arg...) \ |
| 64 | dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
| 65 | __LINE__, current->pid, ##arg) |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 66 | |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 67 | #define MLX5_IB_DEFAULT_UIDX 0xffffff |
| 68 | #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) |
Matan Barak | b368d7c | 2015-12-15 20:30:12 +0200 | [diff] [blame] | 69 | |
Majd Dibbiny | 762f899 | 2016-10-27 16:36:47 +0300 | [diff] [blame] | 70 | #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size) |
| 71 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 72 | enum { |
Yishai Hadas | 7be76be | 2019-12-12 13:09:27 +0200 | [diff] [blame] | 73 | MLX5_IB_MMAP_OFFSET_START = 9, |
| 74 | MLX5_IB_MMAP_OFFSET_END = 255, |
| 75 | }; |
| 76 | |
| 77 | enum { |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 78 | MLX5_IB_MMAP_CMD_SHIFT = 8, |
| 79 | MLX5_IB_MMAP_CMD_MASK = 0xff, |
| 80 | }; |
| 81 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 82 | enum { |
| 83 | MLX5_RES_SCAT_DATA32_CQE = 0x1, |
| 84 | MLX5_RES_SCAT_DATA64_CQE = 0x2, |
| 85 | MLX5_REQ_SCAT_DATA32_CQE = 0x11, |
| 86 | MLX5_REQ_SCAT_DATA64_CQE = 0x22, |
| 87 | }; |
| 88 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 89 | enum mlx5_ib_mad_ifc_flags { |
| 90 | MLX5_MAD_IFC_IGNORE_MKEY = 1, |
| 91 | MLX5_MAD_IFC_IGNORE_BKEY = 2, |
| 92 | MLX5_MAD_IFC_NET_VIEW = 4, |
| 93 | }; |
| 94 | |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 95 | enum { |
Eli Cohen | 2f5ff26 | 2017-01-03 23:55:21 +0200 | [diff] [blame] | 96 | MLX5_CROSS_CHANNEL_BFREG = 0, |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 97 | }; |
| 98 | |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 99 | enum { |
| 100 | MLX5_CQE_VERSION_V0, |
| 101 | MLX5_CQE_VERSION_V1, |
| 102 | }; |
| 103 | |
Artemy Kovalyov | eb76189 | 2017-08-17 15:52:09 +0300 | [diff] [blame] | 104 | enum { |
| 105 | MLX5_TM_MAX_RNDV_MSG_SIZE = 64, |
| 106 | MLX5_TM_MAX_SGE = 1, |
| 107 | }; |
| 108 | |
Yishai Hadas | 4ed131d | 2017-12-24 16:31:35 +0200 | [diff] [blame] | 109 | enum { |
| 110 | MLX5_IB_INVALID_UAR_INDEX = BIT(31), |
Yishai Hadas | 1ee47ab | 2017-12-24 16:31:36 +0200 | [diff] [blame] | 111 | MLX5_IB_INVALID_BFREG = BIT(31), |
Yishai Hadas | 4ed131d | 2017-12-24 16:31:35 +0200 | [diff] [blame] | 112 | }; |
| 113 | |
Ariel Levkovich | 24da001 | 2018-04-05 18:53:27 +0300 | [diff] [blame] | 114 | enum { |
| 115 | MLX5_MAX_MEMIC_PAGES = 0x100, |
| 116 | MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f, |
| 117 | }; |
| 118 | |
| 119 | enum { |
| 120 | MLX5_MEMIC_BASE_ALIGN = 6, |
| 121 | MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, |
| 122 | }; |
| 123 | |
Yishai Hadas | dc2316e | 2019-12-12 12:02:37 +0200 | [diff] [blame] | 124 | enum mlx5_ib_mmap_type { |
| 125 | MLX5_IB_MMAP_TYPE_MEMIC = 1, |
Yishai Hadas | 7be76be | 2019-12-12 13:09:27 +0200 | [diff] [blame] | 126 | MLX5_IB_MMAP_TYPE_VAR = 2, |
Yishai Hadas | 342ee59 | 2020-03-24 08:01:39 +0200 | [diff] [blame] | 127 | MLX5_IB_MMAP_TYPE_UAR_WC = 3, |
| 128 | MLX5_IB_MMAP_TYPE_UAR_NC = 4, |
Yishai Hadas | dc2316e | 2019-12-12 12:02:37 +0200 | [diff] [blame] | 129 | }; |
| 130 | |
Leon Romanovsky | 2152862 | 2020-03-24 08:01:42 +0200 | [diff] [blame] | 131 | struct mlx5_bfreg_info { |
| 132 | u32 *sys_pages; |
| 133 | int num_low_latency_bfregs; |
| 134 | unsigned int *count; |
| 135 | |
| 136 | /* |
| 137 | * protect bfreg allocation data structs |
| 138 | */ |
| 139 | struct mutex lock; |
| 140 | u32 ver; |
| 141 | u8 lib_uar_4k : 1; |
Yishai Hadas | 0a2fd01 | 2020-03-24 08:01:43 +0200 | [diff] [blame] | 142 | u8 lib_uar_dyn : 1; |
Leon Romanovsky | 2152862 | 2020-03-24 08:01:42 +0200 | [diff] [blame] | 143 | u32 num_sys_pages; |
| 144 | u32 num_static_sys_pages; |
| 145 | u32 total_num_bfregs; |
| 146 | u32 num_dyn_bfregs; |
| 147 | }; |
Ariel Levkovich | 25c1332 | 2019-05-05 17:07:13 +0300 | [diff] [blame] | 148 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 149 | struct mlx5_ib_ucontext { |
| 150 | struct ib_ucontext ibucontext; |
| 151 | struct list_head db_page_list; |
| 152 | |
| 153 | /* protect doorbell record alloc/free |
| 154 | */ |
| 155 | struct mutex db_page_mutex; |
Eli Cohen | 2f5ff26 | 2017-01-03 23:55:21 +0200 | [diff] [blame] | 156 | struct mlx5_bfreg_info bfregi; |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 157 | u8 cqe_version; |
majd@mellanox.com | 146d2f1 | 2016-01-14 19:13:02 +0200 | [diff] [blame] | 158 | /* Transport Domain number */ |
| 159 | u32 tdn; |
Artemy Kovalyov | 7d0cc6e | 2017-01-02 11:37:44 +0200 | [diff] [blame] | 160 | |
Eli Cohen | b037c29 | 2017-01-03 23:55:26 +0200 | [diff] [blame] | 161 | u64 lib_caps; |
Yishai Hadas | a8b92ca | 2018-06-17 12:59:57 +0300 | [diff] [blame] | 162 | u16 devx_uid; |
Majd Dibbiny | c6a21c3 | 2018-08-28 14:29:05 +0300 | [diff] [blame] | 163 | /* For RoCE LAG TX affinity */ |
| 164 | atomic_t tx_port_affinity; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 165 | }; |
| 166 | |
| 167 | static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) |
| 168 | { |
| 169 | return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); |
| 170 | } |
| 171 | |
| 172 | struct mlx5_ib_pd { |
| 173 | struct ib_pd ibpd; |
| 174 | u32 pdn; |
Yishai Hadas | a1069c1 | 2018-09-20 21:39:19 +0300 | [diff] [blame] | 175 | u16 uid; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 176 | }; |
| 177 | |
Mark Bloch | b4749bf | 2018-08-28 14:18:51 +0300 | [diff] [blame] | 178 | enum { |
| 179 | MLX5_IB_FLOW_ACTION_MODIFY_HEADER, |
Mark Bloch | a090d0d | 2018-08-28 14:18:54 +0300 | [diff] [blame] | 180 | MLX5_IB_FLOW_ACTION_PACKET_REFORMAT, |
Mark Bloch | 08aeb97 | 2018-08-28 14:18:53 +0300 | [diff] [blame] | 181 | MLX5_IB_FLOW_ACTION_DECAP, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 182 | }; |
| 183 | |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 184 | #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) |
Maor Gottlieb | 35d19011 | 2016-03-07 18:51:47 +0200 | [diff] [blame] | 185 | #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 186 | #if (MLX5_IB_FLOW_LAST_PRIO <= 0) |
| 187 | #error "Invalid number of bypass priorities" |
| 188 | #endif |
| 189 | #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1) |
| 190 | |
| 191 | #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) |
Maor Gottlieb | cc0e5d4 | 2016-08-28 14:16:34 +0300 | [diff] [blame] | 192 | #define MLX5_IB_NUM_SNIFFER_FTS 2 |
Aviad Yehezkel | 802c212 | 2018-03-28 09:27:53 +0300 | [diff] [blame] | 193 | #define MLX5_IB_NUM_EGRESS_FTS 1 |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 194 | struct mlx5_ib_flow_prio { |
| 195 | struct mlx5_flow_table *flow_table; |
| 196 | unsigned int refcount; |
| 197 | }; |
| 198 | |
| 199 | struct mlx5_ib_flow_handler { |
| 200 | struct list_head list; |
| 201 | struct ib_flow ibflow; |
Maor Gottlieb | 5497adc | 2016-08-28 14:16:31 +0300 | [diff] [blame] | 202 | struct mlx5_ib_flow_prio *prio; |
Mark Bloch | 74491de | 2016-08-31 11:24:25 +0000 | [diff] [blame] | 203 | struct mlx5_flow_handle *rule; |
Raed Salem | 3b3233f | 2018-05-31 16:43:39 +0300 | [diff] [blame] | 204 | struct ib_counters *ibcounters; |
Yishai Hadas | d4be3f4 | 2018-07-23 15:25:10 +0300 | [diff] [blame] | 205 | struct mlx5_ib_dev *dev; |
| 206 | struct mlx5_ib_flow_matcher *flow_matcher; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 207 | }; |
| 208 | |
Yishai Hadas | fd44e38 | 2018-07-23 15:25:07 +0300 | [diff] [blame] | 209 | struct mlx5_ib_flow_matcher { |
| 210 | struct mlx5_ib_match_params matcher_mask; |
| 211 | int mask_len; |
| 212 | enum mlx5_ib_flow_type flow_type; |
Mark Bloch | b47fd4f | 2018-09-06 17:27:07 +0300 | [diff] [blame] | 213 | enum mlx5_flow_namespace_type ns_type; |
Yishai Hadas | fd44e38 | 2018-07-23 15:25:07 +0300 | [diff] [blame] | 214 | u16 priority; |
| 215 | struct mlx5_core_dev *mdev; |
| 216 | atomic_t usecnt; |
| 217 | u8 match_criteria_enable; |
| 218 | }; |
| 219 | |
Yishai Hadas | 30f2fe4 | 2020-02-19 21:05:18 +0200 | [diff] [blame] | 220 | struct mlx5_ib_pp { |
| 221 | u16 index; |
| 222 | struct mlx5_core_dev *mdev; |
| 223 | }; |
| 224 | |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 225 | struct mlx5_ib_flow_db { |
| 226 | struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; |
Mark Bloch | 78dd0c4 | 2018-09-02 12:51:31 +0300 | [diff] [blame] | 227 | struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; |
Maor Gottlieb | cc0e5d4 | 2016-08-28 14:16:34 +0300 | [diff] [blame] | 228 | struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; |
Aviad Yehezkel | 802c212 | 2018-03-28 09:27:53 +0300 | [diff] [blame] | 229 | struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; |
Mark Bloch | 13a4376 | 2019-03-28 15:46:21 +0200 | [diff] [blame] | 230 | struct mlx5_ib_flow_prio fdb; |
Mark Zhang | d8abe88 | 2019-08-19 14:36:26 +0300 | [diff] [blame] | 231 | struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; |
Michael Guralnik | af9c384 | 2020-03-24 08:14:25 +0200 | [diff] [blame] | 232 | struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT]; |
Aviv Heller | 9ef9c64 | 2016-09-18 20:48:01 +0300 | [diff] [blame] | 233 | struct mlx5_flow_table *lag_demux_ft; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 234 | /* Protect flow steering bypass flow tables |
| 235 | * when add/del flow rules. |
| 236 | * only single add/removal of flow steering rule could be done |
| 237 | * simultaneously. |
| 238 | */ |
| 239 | struct mutex lock; |
| 240 | }; |
| 241 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 242 | /* Use macros here so that don't have to duplicate |
| 243 | * enum ib_send_flags and enum ib_qp_type for low-level driver |
| 244 | */ |
| 245 | |
Artemy Kovalyov | 3161625 | 2017-01-02 11:37:42 +0200 | [diff] [blame] | 246 | #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) |
| 247 | #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) |
| 248 | #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) |
| 249 | #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) |
| 250 | #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) |
| 251 | #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 252 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 253 | #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 254 | /* |
| 255 | * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI |
| 256 | * creates the actual hardware QP. |
| 257 | */ |
| 258 | #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 |
Moni Shoua | b4aaa1f | 2018-01-02 16:19:31 +0200 | [diff] [blame] | 259 | #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3 |
| 260 | #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4 |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 261 | #define MLX5_IB_WR_UMR IB_WR_RESERVED1 |
| 262 | |
Artemy Kovalyov | 3161625 | 2017-01-02 11:37:42 +0200 | [diff] [blame] | 263 | #define MLX5_IB_UMR_OCTOWORD 16 |
| 264 | #define MLX5_IB_UMR_XLT_ALIGNMENT 64 |
| 265 | |
Artemy Kovalyov | 7d0cc6e | 2017-01-02 11:37:44 +0200 | [diff] [blame] | 266 | #define MLX5_IB_UPD_XLT_ZAP BIT(0) |
| 267 | #define MLX5_IB_UPD_XLT_ENABLE BIT(1) |
| 268 | #define MLX5_IB_UPD_XLT_ATOMIC BIT(2) |
| 269 | #define MLX5_IB_UPD_XLT_ADDR BIT(3) |
| 270 | #define MLX5_IB_UPD_XLT_PD BIT(4) |
| 271 | #define MLX5_IB_UPD_XLT_ACCESS BIT(5) |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 272 | #define MLX5_IB_UPD_XLT_INDIRECT BIT(6) |
Artemy Kovalyov | 7d0cc6e | 2017-01-02 11:37:44 +0200 | [diff] [blame] | 273 | |
Haggai Eran | b11a4f9 | 2016-02-29 15:45:03 +0200 | [diff] [blame] | 274 | /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. |
| 275 | * |
| 276 | * These flags are intended for internal use by the mlx5_ib driver, and they |
| 277 | * rely on the range reserved for that use in the ib_qp_create_flags enum. |
| 278 | */ |
Michael Guralnik | 3f89b01 | 2019-10-20 09:43:59 +0300 | [diff] [blame] | 279 | #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START |
Michael Guralnik | 11f552e | 2019-06-10 15:21:24 +0300 | [diff] [blame] | 280 | #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1) |
Haggai Eran | b11a4f9 | 2016-02-29 15:45:03 +0200 | [diff] [blame] | 281 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 282 | struct wr_list { |
| 283 | u16 opcode; |
| 284 | u16 next; |
| 285 | }; |
| 286 | |
Noa Osherovich | e4cc4fa | 2017-01-18 15:40:03 +0200 | [diff] [blame] | 287 | enum mlx5_ib_rq_flags { |
| 288 | MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0, |
Noa Osherovich | b1383aa | 2017-10-29 13:59:45 +0200 | [diff] [blame] | 289 | MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1, |
Noa Osherovich | e4cc4fa | 2017-01-18 15:40:03 +0200 | [diff] [blame] | 290 | }; |
| 291 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 292 | struct mlx5_ib_wq { |
Guy Levi | 34f4c95 | 2018-11-26 08:15:50 +0200 | [diff] [blame] | 293 | struct mlx5_frag_buf_ctrl fbc; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 294 | u64 *wrid; |
| 295 | u32 *wr_data; |
| 296 | struct wr_list *w_list; |
| 297 | unsigned *wqe_head; |
| 298 | u16 unsig_count; |
| 299 | |
| 300 | /* serialize post to the work queue |
| 301 | */ |
| 302 | spinlock_t lock; |
| 303 | int wqe_cnt; |
| 304 | int max_post; |
| 305 | int max_gs; |
| 306 | int offset; |
| 307 | int wqe_shift; |
| 308 | unsigned head; |
| 309 | unsigned tail; |
| 310 | u16 cur_post; |
Leon Romanovsky | 950bf4f | 2020-03-18 11:16:40 +0200 | [diff] [blame] | 311 | u16 last_poll; |
Guy Levi | 34f4c95 | 2018-11-26 08:15:50 +0200 | [diff] [blame] | 312 | void *cur_edge; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 313 | }; |
| 314 | |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 315 | enum mlx5_ib_wq_flags { |
| 316 | MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1, |
Noa Osherovich | ccc8708 | 2017-10-17 18:01:13 +0300 | [diff] [blame] | 317 | MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2, |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 318 | }; |
| 319 | |
Noa Osherovich | b4f3459 | 2017-10-17 18:01:12 +0300 | [diff] [blame] | 320 | #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9 |
| 321 | #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16 |
| 322 | #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 |
| 323 | #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13 |
Mark Zhang | c16339b | 2019-11-15 17:45:55 +0200 | [diff] [blame] | 324 | #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3 |
Noa Osherovich | b4f3459 | 2017-10-17 18:01:12 +0300 | [diff] [blame] | 325 | |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 326 | struct mlx5_ib_rwq { |
| 327 | struct ib_wq ibwq; |
Yishai Hadas | 350d0e4 | 2016-08-28 14:58:18 +0300 | [diff] [blame] | 328 | struct mlx5_core_qp core_qp; |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 329 | u32 rq_num_pas; |
| 330 | u32 log_rq_stride; |
| 331 | u32 log_rq_size; |
| 332 | u32 rq_page_offset; |
| 333 | u32 log_page_size; |
Noa Osherovich | ccc8708 | 2017-10-17 18:01:13 +0300 | [diff] [blame] | 334 | u32 log_num_strides; |
| 335 | u32 two_byte_shift_en; |
| 336 | u32 single_stride_log_num_of_bytes; |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 337 | struct ib_umem *umem; |
| 338 | size_t buf_size; |
| 339 | unsigned int page_shift; |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 340 | struct mlx5_db db; |
| 341 | u32 user_index; |
| 342 | u32 wqe_count; |
| 343 | u32 wqe_shift; |
| 344 | int wq_sig; |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 345 | u32 create_flags; /* Use enum mlx5_ib_wq_flags */ |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 346 | }; |
| 347 | |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 348 | struct mlx5_ib_rwq_ind_table { |
| 349 | struct ib_rwq_ind_table ib_rwq_ind_tbl; |
| 350 | u32 rqtn; |
Yishai Hadas | 5deba86 | 2018-09-20 21:39:28 +0300 | [diff] [blame] | 351 | u16 uid; |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 352 | }; |
| 353 | |
majd@mellanox.com | 19098df | 2016-01-14 19:13:03 +0200 | [diff] [blame] | 354 | struct mlx5_ib_ubuffer { |
| 355 | struct ib_umem *umem; |
| 356 | int buf_size; |
| 357 | u64 buf_addr; |
| 358 | }; |
| 359 | |
| 360 | struct mlx5_ib_qp_base { |
| 361 | struct mlx5_ib_qp *container_mibqp; |
| 362 | struct mlx5_core_qp mqp; |
| 363 | struct mlx5_ib_ubuffer ubuffer; |
| 364 | }; |
| 365 | |
| 366 | struct mlx5_ib_qp_trans { |
| 367 | struct mlx5_ib_qp_base base; |
| 368 | u16 xrcdn; |
| 369 | u8 alt_port; |
| 370 | u8 atomic_rd_en; |
| 371 | u8 resp_depth; |
| 372 | }; |
| 373 | |
Yishai Hadas | 28d6137 | 2016-05-23 15:20:56 +0300 | [diff] [blame] | 374 | struct mlx5_ib_rss_qp { |
| 375 | u32 tirn; |
| 376 | }; |
| 377 | |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 378 | struct mlx5_ib_rq { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 379 | struct mlx5_ib_qp_base base; |
| 380 | struct mlx5_ib_wq *rq; |
| 381 | struct mlx5_ib_ubuffer ubuffer; |
| 382 | struct mlx5_db *doorbell; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 383 | u32 tirn; |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 384 | u8 state; |
Noa Osherovich | e4cc4fa | 2017-01-18 15:40:03 +0200 | [diff] [blame] | 385 | u32 flags; |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 386 | }; |
| 387 | |
| 388 | struct mlx5_ib_sq { |
| 389 | struct mlx5_ib_qp_base base; |
| 390 | struct mlx5_ib_wq *sq; |
| 391 | struct mlx5_ib_ubuffer ubuffer; |
| 392 | struct mlx5_db *doorbell; |
Mark Bloch | b96c9dd | 2018-01-29 10:40:37 +0000 | [diff] [blame] | 393 | struct mlx5_flow_handle *flow_rule; |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 394 | u32 tisn; |
| 395 | u8 state; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 396 | }; |
| 397 | |
| 398 | struct mlx5_ib_raw_packet_qp { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 399 | struct mlx5_ib_sq sq; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 400 | struct mlx5_ib_rq rq; |
| 401 | }; |
| 402 | |
Eli Cohen | 5fe9dec | 2017-01-03 23:55:25 +0200 | [diff] [blame] | 403 | struct mlx5_bf { |
| 404 | int buf_size; |
| 405 | unsigned long offset; |
| 406 | struct mlx5_sq_bfreg *bfreg; |
| 407 | }; |
| 408 | |
Moni Shoua | b4aaa1f | 2018-01-02 16:19:31 +0200 | [diff] [blame] | 409 | struct mlx5_ib_dct { |
| 410 | struct mlx5_core_dct mdct; |
| 411 | u32 *in; |
| 412 | }; |
| 413 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 414 | struct mlx5_ib_qp { |
| 415 | struct ib_qp ibqp; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 416 | union { |
majd@mellanox.com | 0fb2ed6 | 2016-01-14 19:13:04 +0200 | [diff] [blame] | 417 | struct mlx5_ib_qp_trans trans_qp; |
| 418 | struct mlx5_ib_raw_packet_qp raw_packet_qp; |
Yishai Hadas | 28d6137 | 2016-05-23 15:20:56 +0300 | [diff] [blame] | 419 | struct mlx5_ib_rss_qp rss_qp; |
Moni Shoua | b4aaa1f | 2018-01-02 16:19:31 +0200 | [diff] [blame] | 420 | struct mlx5_ib_dct dct; |
Maor Gottlieb | 038d2ef | 2016-01-11 10:26:07 +0200 | [diff] [blame] | 421 | }; |
Yonatan Cohen | 388ca8b | 2018-01-02 16:08:06 +0200 | [diff] [blame] | 422 | struct mlx5_frag_buf buf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 423 | |
| 424 | struct mlx5_db db; |
| 425 | struct mlx5_ib_wq rq; |
| 426 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 427 | u8 sq_signal_bits; |
Max Gurtovoy | 6e8484c | 2017-05-28 10:53:11 +0300 | [diff] [blame] | 428 | u8 next_fence; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 429 | struct mlx5_ib_wq sq; |
| 430 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 431 | /* serialize qp state modifications |
| 432 | */ |
| 433 | struct mutex mutex; |
Leon Romanovsky | a8f3ea6 | 2020-04-27 18:46:17 +0300 | [diff] [blame] | 434 | /* cached variant of create_flags from struct ib_qp_init_attr */ |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 435 | u32 flags; |
| 436 | u8 port; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 437 | u8 state; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 438 | int max_inline_data; |
Eli Cohen | 5fe9dec | 2017-01-03 23:55:25 +0200 | [diff] [blame] | 439 | struct mlx5_bf bf; |
Leon Romanovsky | 2be08c3 | 2020-04-27 18:46:13 +0300 | [diff] [blame] | 440 | u8 has_rq:1; |
| 441 | u8 is_rss:1; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 442 | |
| 443 | /* only for user space QPs. For kernel |
| 444 | * we have it from the bf object |
| 445 | */ |
Eli Cohen | 2f5ff26 | 2017-01-03 23:55:21 +0200 | [diff] [blame] | 446 | int bfregn; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 447 | |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 448 | struct list_head qps_list; |
| 449 | struct list_head cq_recv_list; |
| 450 | struct list_head cq_send_list; |
Bodong Wang | 61147f3 | 2018-03-19 15:10:30 +0200 | [diff] [blame] | 451 | struct mlx5_rate_limit rl; |
Yishai Hadas | c2e53b2 | 2017-06-08 16:15:08 +0300 | [diff] [blame] | 452 | u32 underlay_qpn; |
Mark Bloch | 175edba | 2018-09-17 13:30:48 +0300 | [diff] [blame] | 453 | u32 flags_en; |
Leon Romanovsky | 7aede1a2 | 2020-04-27 18:46:20 +0300 | [diff] [blame] | 454 | /* |
| 455 | * IB/core doesn't store low-level QP types, so |
| 456 | * store both MLX and IBTA types in the field below. |
| 457 | * IB_QPT_DRIVER will be break to DCI/DCT subtypes. |
| 458 | */ |
| 459 | enum ib_qp_type type; |
Mark Zhang | d14133d | 2019-07-02 13:02:36 +0300 | [diff] [blame] | 460 | /* A flag to indicate if there's a new counter is configured |
| 461 | * but not take effective |
| 462 | */ |
| 463 | u32 counter_pending; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 464 | }; |
| 465 | |
| 466 | struct mlx5_ib_cq_buf { |
Yonatan Cohen | 388ca8b | 2018-01-02 16:08:06 +0200 | [diff] [blame] | 467 | struct mlx5_frag_buf_ctrl fbc; |
Tariq Toukan | 4972e6f | 2018-09-12 15:36:41 +0300 | [diff] [blame] | 468 | struct mlx5_frag_buf frag_buf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 469 | struct ib_umem *umem; |
| 470 | int cqe_size; |
Eli Cohen | bde5158 | 2014-01-14 17:45:18 +0200 | [diff] [blame] | 471 | int nent; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 472 | }; |
| 473 | |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 474 | struct mlx5_umr_wr { |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 475 | struct ib_send_wr wr; |
Artemy Kovalyov | 3161625 | 2017-01-02 11:37:42 +0200 | [diff] [blame] | 476 | u64 virt_addr; |
| 477 | u64 offset; |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 478 | struct ib_pd *pd; |
| 479 | unsigned int page_shift; |
Artemy Kovalyov | 3161625 | 2017-01-02 11:37:42 +0200 | [diff] [blame] | 480 | unsigned int xlt_size; |
Maor Gottlieb | b216af4 | 2016-11-27 15:18:22 +0200 | [diff] [blame] | 481 | u64 length; |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 482 | int access_flags; |
| 483 | u32 mkey; |
Yishai Hadas | 6a05395 | 2019-07-23 09:57:25 +0300 | [diff] [blame] | 484 | u8 ignore_free_state:1; |
Haggai Eran | 968e78d | 2014-12-11 17:04:11 +0200 | [diff] [blame] | 485 | }; |
| 486 | |
Bart Van Assche | f696bf6 | 2018-07-18 09:25:14 -0700 | [diff] [blame] | 487 | static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 488 | { |
| 489 | return container_of(wr, struct mlx5_umr_wr, wr); |
| 490 | } |
| 491 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 492 | struct mlx5_shared_mr_info { |
| 493 | int mr_id; |
| 494 | struct ib_umem *umem; |
| 495 | }; |
| 496 | |
Guy Levi | 7a0c8f4 | 2017-10-19 08:25:53 +0300 | [diff] [blame] | 497 | enum mlx5_ib_cq_pr_flags { |
| 498 | MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, |
| 499 | }; |
| 500 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 501 | struct mlx5_ib_cq { |
| 502 | struct ib_cq ibcq; |
| 503 | struct mlx5_core_cq mcq; |
| 504 | struct mlx5_ib_cq_buf buf; |
| 505 | struct mlx5_db db; |
| 506 | |
| 507 | /* serialize access to the CQ |
| 508 | */ |
| 509 | spinlock_t lock; |
| 510 | |
| 511 | /* protect resize cq |
| 512 | */ |
| 513 | struct mutex resize_mutex; |
Eli Cohen | bde5158 | 2014-01-14 17:45:18 +0200 | [diff] [blame] | 514 | struct mlx5_ib_cq_buf *resize_buf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 515 | struct ib_umem *resize_umem; |
| 516 | int cqe_size; |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 517 | struct list_head list_send_qp; |
| 518 | struct list_head list_recv_qp; |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 519 | u32 create_flags; |
Haggai Eran | 25361e0 | 2016-02-29 15:45:08 +0200 | [diff] [blame] | 520 | struct list_head wc_list; |
| 521 | enum ib_cq_notify_flags notify_flags; |
| 522 | struct work_struct notify_work; |
Guy Levi | 7a0c8f4 | 2017-10-19 08:25:53 +0300 | [diff] [blame] | 523 | u16 private_flags; /* Use mlx5_ib_cq_pr_flags */ |
Haggai Eran | 25361e0 | 2016-02-29 15:45:08 +0200 | [diff] [blame] | 524 | }; |
| 525 | |
| 526 | struct mlx5_ib_wc { |
| 527 | struct ib_wc wc; |
| 528 | struct list_head list; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 529 | }; |
| 530 | |
| 531 | struct mlx5_ib_srq { |
| 532 | struct ib_srq ibsrq; |
| 533 | struct mlx5_core_srq msrq; |
Yonatan Cohen | 388ca8b | 2018-01-02 16:08:06 +0200 | [diff] [blame] | 534 | struct mlx5_frag_buf buf; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 535 | struct mlx5_db db; |
Guy Levi | 20e5a59 | 2018-11-26 08:15:39 +0200 | [diff] [blame] | 536 | struct mlx5_frag_buf_ctrl fbc; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 537 | u64 *wrid; |
| 538 | /* protect SRQ hanlding |
| 539 | */ |
| 540 | spinlock_t lock; |
| 541 | int head; |
| 542 | int tail; |
| 543 | u16 wqe_ctr; |
| 544 | struct ib_umem *umem; |
| 545 | /* serialize arming a SRQ |
| 546 | */ |
| 547 | struct mutex mutex; |
| 548 | int wq_sig; |
| 549 | }; |
| 550 | |
| 551 | struct mlx5_ib_xrcd { |
| 552 | struct ib_xrcd ibxrcd; |
| 553 | u32 xrcdn; |
| 554 | }; |
| 555 | |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 556 | enum mlx5_ib_mtt_access_flags { |
| 557 | MLX5_IB_MTT_READ = (1 << 0), |
| 558 | MLX5_IB_MTT_WRITE = (1 << 1), |
| 559 | }; |
| 560 | |
Yishai Hadas | dc2316e | 2019-12-12 12:02:37 +0200 | [diff] [blame] | 561 | struct mlx5_user_mmap_entry { |
| 562 | struct rdma_user_mmap_entry rdma_entry; |
| 563 | u8 mmap_flag; |
| 564 | u64 address; |
Yishai Hadas | 7be76be | 2019-12-12 13:09:27 +0200 | [diff] [blame] | 565 | u32 page_idx; |
Yishai Hadas | dc2316e | 2019-12-12 12:02:37 +0200 | [diff] [blame] | 566 | }; |
| 567 | |
Ariel Levkovich | 24da001 | 2018-04-05 18:53:27 +0300 | [diff] [blame] | 568 | struct mlx5_ib_dm { |
| 569 | struct ib_dm ibdm; |
| 570 | phys_addr_t dev_addr; |
Ariel Levkovich | 3b113a1 | 2019-05-05 17:07:11 +0300 | [diff] [blame] | 571 | u32 type; |
| 572 | size_t size; |
Ariel Levkovich | 25c1332 | 2019-05-05 17:07:13 +0300 | [diff] [blame] | 573 | union { |
| 574 | struct { |
| 575 | u32 obj_id; |
| 576 | } icm_dm; |
| 577 | /* other dm types specific params should be added here */ |
| 578 | }; |
Yishai Hadas | dc2316e | 2019-12-12 12:02:37 +0200 | [diff] [blame] | 579 | struct mlx5_user_mmap_entry mentry; |
Ariel Levkovich | 24da001 | 2018-04-05 18:53:27 +0300 | [diff] [blame] | 580 | }; |
| 581 | |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 582 | #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) |
| 583 | |
Ariel Levkovich | 3b113a1 | 2019-05-05 17:07:11 +0300 | [diff] [blame] | 584 | #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ |
| 585 | IB_ACCESS_REMOTE_WRITE |\ |
| 586 | IB_ACCESS_REMOTE_READ |\ |
| 587 | IB_ACCESS_REMOTE_ATOMIC |\ |
| 588 | IB_ZERO_BASED) |
Ariel Levkovich | 6c29f57 | 2018-04-05 18:53:29 +0300 | [diff] [blame] | 589 | |
Ariel Levkovich | 25c1332 | 2019-05-05 17:07:13 +0300 | [diff] [blame] | 590 | #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ |
| 591 | IB_ACCESS_REMOTE_WRITE |\ |
| 592 | IB_ACCESS_REMOTE_READ |\ |
| 593 | IB_ZERO_BASED) |
| 594 | |
Erez Alfasi | a3de94e | 2019-10-16 09:23:05 +0300 | [diff] [blame] | 595 | #define mlx5_update_odp_stats(mr, counter_name, value) \ |
| 596 | atomic64_add(value, &((mr)->odp_stats.counter_name)) |
| 597 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 598 | struct mlx5_ib_mr { |
| 599 | struct ib_mr ibmr; |
Sagi Grimberg | 8a187ee | 2015-10-13 19:11:26 +0300 | [diff] [blame] | 600 | void *descs; |
| 601 | dma_addr_t desc_map; |
| 602 | int ndescs; |
Max Gurtovoy | 6c98447 | 2019-06-11 18:52:42 +0300 | [diff] [blame] | 603 | int data_length; |
| 604 | int meta_ndescs; |
| 605 | int meta_length; |
Sagi Grimberg | 8a187ee | 2015-10-13 19:11:26 +0300 | [diff] [blame] | 606 | int max_descs; |
| 607 | int desc_size; |
Sagi Grimberg | b005d31 | 2016-02-29 19:07:33 +0200 | [diff] [blame] | 608 | int access_mode; |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 609 | struct mlx5_core_mkey mmkey; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 610 | struct ib_umem *umem; |
| 611 | struct mlx5_shared_mr_info *smr_info; |
| 612 | struct list_head list; |
Jason Gunthorpe | b91e175 | 2020-03-10 10:22:32 +0200 | [diff] [blame] | 613 | unsigned int order; |
| 614 | struct mlx5_cache_ent *cache_ent; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 615 | int npages; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 616 | struct mlx5_ib_dev *dev; |
Saeed Mahameed | ec22eb5 | 2016-07-16 06:28:36 +0300 | [diff] [blame] | 617 | u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; |
Sagi Grimberg | 3121e3c | 2014-02-23 14:19:06 +0200 | [diff] [blame] | 618 | struct mlx5_core_sig_ctx *sig; |
Sagi Grimberg | 8a187ee | 2015-10-13 19:11:26 +0300 | [diff] [blame] | 619 | void *descs_alloc; |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 620 | int access_flags; /* Needed for rereg MR */ |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 621 | |
| 622 | struct mlx5_ib_mr *parent; |
Israel Rukshin | de0ae95 | 2019-06-11 18:52:55 +0300 | [diff] [blame] | 623 | /* Needed for IB_MR_TYPE_INTEGRITY */ |
| 624 | struct mlx5_ib_mr *pi_mr; |
| 625 | struct mlx5_ib_mr *klm_mr; |
| 626 | struct mlx5_ib_mr *mtt_mr; |
Max Gurtovoy | 2563e2f | 2019-06-11 18:52:56 +0300 | [diff] [blame] | 627 | u64 data_iova; |
Israel Rukshin | de0ae95 | 2019-06-11 18:52:55 +0300 | [diff] [blame] | 628 | u64 pi_iova; |
| 629 | |
Jason Gunthorpe | 423f52d | 2019-10-09 13:09:29 -0300 | [diff] [blame] | 630 | /* For ODP and implicit */ |
Jason Gunthorpe | 5256edc | 2019-10-09 13:09:32 -0300 | [diff] [blame] | 631 | atomic_t num_deferred_work; |
Artemy Kovalyov | de5ed00 | 2020-02-27 13:39:18 +0200 | [diff] [blame] | 632 | wait_queue_head_t q_deferred_work; |
Jason Gunthorpe | 423f52d | 2019-10-09 13:09:29 -0300 | [diff] [blame] | 633 | struct xarray implicit_children; |
Jason Gunthorpe | 5256edc | 2019-10-09 13:09:32 -0300 | [diff] [blame] | 634 | union { |
| 635 | struct rcu_head rcu; |
| 636 | struct list_head elm; |
| 637 | struct work_struct work; |
| 638 | } odp_destroy; |
Erez Alfasi | a3de94e | 2019-10-16 09:23:05 +0300 | [diff] [blame] | 639 | struct ib_odp_counters odp_stats; |
Erez Alfasi | e1b95ae | 2019-10-16 09:23:07 +0300 | [diff] [blame] | 640 | bool is_odp_implicit; |
Jason Gunthorpe | 423f52d | 2019-10-09 13:09:29 -0300 | [diff] [blame] | 641 | |
| 642 | struct mlx5_async_work cb_work; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 643 | }; |
| 644 | |
Leon Romanovsky | 8b4d5bc | 2019-01-08 16:07:25 +0200 | [diff] [blame] | 645 | static inline bool is_odp_mr(struct mlx5_ib_mr *mr) |
| 646 | { |
| 647 | return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && |
| 648 | mr->umem->is_odp; |
| 649 | } |
| 650 | |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 651 | struct mlx5_ib_mw { |
| 652 | struct ib_mw ibmw; |
| 653 | struct mlx5_core_mkey mmkey; |
Artemy Kovalyov | db570d7 | 2017-04-05 09:23:59 +0300 | [diff] [blame] | 654 | int ndescs; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 655 | }; |
| 656 | |
Yishai Hadas | 534fd7a | 2019-01-13 16:01:17 +0200 | [diff] [blame] | 657 | struct mlx5_ib_devx_mr { |
| 658 | struct mlx5_core_mkey mmkey; |
| 659 | int ndescs; |
Yishai Hadas | 534fd7a | 2019-01-13 16:01:17 +0200 | [diff] [blame] | 660 | }; |
| 661 | |
Shachar Raindel | a74d241 | 2014-05-22 14:50:12 +0300 | [diff] [blame] | 662 | struct mlx5_ib_umr_context { |
Christoph Hellwig | add08d7 | 2016-03-03 09:38:22 +0100 | [diff] [blame] | 663 | struct ib_cqe cqe; |
Shachar Raindel | a74d241 | 2014-05-22 14:50:12 +0300 | [diff] [blame] | 664 | enum ib_wc_status status; |
| 665 | struct completion done; |
| 666 | }; |
| 667 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 668 | struct umr_common { |
| 669 | struct ib_pd *pd; |
| 670 | struct ib_cq *cq; |
| 671 | struct ib_qp *qp; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 672 | /* control access to UMR QP |
| 673 | */ |
| 674 | struct semaphore sem; |
| 675 | }; |
| 676 | |
| 677 | enum { |
| 678 | MLX5_FMR_INVALID, |
| 679 | MLX5_FMR_VALID, |
| 680 | MLX5_FMR_BUSY, |
| 681 | }; |
| 682 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 683 | struct mlx5_cache_ent { |
| 684 | struct list_head head; |
| 685 | /* sync access to the cahce entry |
| 686 | */ |
| 687 | spinlock_t lock; |
| 688 | |
| 689 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 690 | char name[4]; |
| 691 | u32 order; |
Artemy Kovalyov | 49780d4 | 2017-01-18 16:58:10 +0200 | [diff] [blame] | 692 | u32 xlt; |
| 693 | u32 access_mode; |
| 694 | u32 page; |
| 695 | |
Jason Gunthorpe | b9358bd | 2020-03-10 10:22:36 +0200 | [diff] [blame] | 696 | u8 disabled:1; |
Jason Gunthorpe | 1c78a21 | 2020-03-10 10:22:37 +0200 | [diff] [blame] | 697 | u8 fill_to_high_water:1; |
Jason Gunthorpe | b9358bd | 2020-03-10 10:22:36 +0200 | [diff] [blame] | 698 | |
Jason Gunthorpe | 7c8691a | 2020-03-10 10:22:31 +0200 | [diff] [blame] | 699 | /* |
| 700 | * - available_mrs is the length of list head, ie the number of MRs |
| 701 | * available for immediate allocation. |
| 702 | * - total_mrs is available_mrs plus all in use MRs that could be |
| 703 | * returned to the cache. |
| 704 | * - limit is the low water mark for available_mrs, 2* limit is the |
| 705 | * upper water mark. |
| 706 | * - pending is the number of MRs currently being created |
| 707 | */ |
| 708 | u32 total_mrs; |
| 709 | u32 available_mrs; |
| 710 | u32 limit; |
| 711 | u32 pending; |
| 712 | |
| 713 | /* Statistics */ |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 714 | u32 miss; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 715 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 716 | struct mlx5_ib_dev *dev; |
| 717 | struct work_struct work; |
| 718 | struct delayed_work dwork; |
| 719 | }; |
| 720 | |
| 721 | struct mlx5_mr_cache { |
| 722 | struct workqueue_struct *wq; |
| 723 | struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 724 | struct dentry *root; |
| 725 | unsigned long last_add; |
| 726 | }; |
| 727 | |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 728 | struct mlx5_ib_gsi_qp; |
| 729 | |
| 730 | struct mlx5_ib_port_resources { |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 731 | struct mlx5_ib_resources *devr; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 732 | struct mlx5_ib_gsi_qp *gsi; |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 733 | struct work_struct pkey_change_work; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 734 | }; |
| 735 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 736 | struct mlx5_ib_resources { |
| 737 | struct ib_cq *c0; |
| 738 | struct ib_xrcd *x0; |
| 739 | struct ib_xrcd *x1; |
| 740 | struct ib_pd *p0; |
| 741 | struct ib_srq *s0; |
Haggai Abramonvsky | 4aa17b2 | 2015-06-04 19:30:48 +0300 | [diff] [blame] | 742 | struct ib_srq *s1; |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 743 | struct mlx5_ib_port_resources ports[2]; |
| 744 | /* Protects changes to the port resources */ |
| 745 | struct mutex mutex; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 746 | }; |
| 747 | |
Parav Pandit | e1f24a7 | 2017-04-16 07:29:29 +0300 | [diff] [blame] | 748 | struct mlx5_ib_counters { |
Kamal Heib | 7c16f47 | 2017-01-18 15:25:09 +0200 | [diff] [blame] | 749 | const char **names; |
| 750 | size_t *offsets; |
Parav Pandit | e1f24a7 | 2017-04-16 07:29:29 +0300 | [diff] [blame] | 751 | u32 num_q_counters; |
| 752 | u32 num_cong_counters; |
Talat Batheesh | 9f876f3 | 2018-06-21 15:37:56 +0300 | [diff] [blame] | 753 | u32 num_ext_ppcnt_counters; |
Kamal Heib | 7c16f47 | 2017-01-18 15:25:09 +0200 | [diff] [blame] | 754 | u16 set_id; |
| 755 | }; |
| 756 | |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 757 | struct mlx5_ib_multiport_info; |
| 758 | |
| 759 | struct mlx5_ib_multiport { |
| 760 | struct mlx5_ib_multiport_info *mpi; |
| 761 | /* To be held when accessing the multiport info */ |
| 762 | spinlock_t mpi_lock; |
| 763 | }; |
| 764 | |
Achiad Shochat | fc24fc5 | 2015-12-23 18:47:17 +0200 | [diff] [blame] | 765 | struct mlx5_roce { |
| 766 | /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL |
| 767 | * netdev pointer |
| 768 | */ |
| 769 | rwlock_t netdev_lock; |
| 770 | struct net_device *netdev; |
| 771 | struct notifier_block nb; |
Majd Dibbiny | c6a21c3 | 2018-08-28 14:29:05 +0300 | [diff] [blame] | 772 | atomic_t tx_port_affinity; |
Moni Shoua | fd65f1b | 2017-05-30 09:56:05 +0300 | [diff] [blame] | 773 | enum ib_port_state last_port_state; |
Daniel Jurgens | 7fd8aef | 2018-01-04 17:25:35 +0200 | [diff] [blame] | 774 | struct mlx5_ib_dev *dev; |
| 775 | u8 native_port_num; |
Achiad Shochat | fc24fc5 | 2015-12-23 18:47:17 +0200 | [diff] [blame] | 776 | }; |
| 777 | |
Mark Bloch | 95579e7 | 2019-03-28 15:27:33 +0200 | [diff] [blame] | 778 | struct mlx5_ib_port { |
| 779 | struct mlx5_ib_counters cnts; |
| 780 | struct mlx5_ib_multiport mp; |
| 781 | struct mlx5_ib_dbg_cc_params *dbg_cc_params; |
| 782 | struct mlx5_roce roce; |
Mark Bloch | 6a4d00b | 2019-03-28 15:27:37 +0200 | [diff] [blame] | 783 | struct mlx5_eswitch_rep *rep; |
Mark Bloch | 95579e7 | 2019-03-28 15:27:33 +0200 | [diff] [blame] | 784 | }; |
| 785 | |
Parav Pandit | 4a2da0b | 2017-05-30 10:05:15 +0300 | [diff] [blame] | 786 | struct mlx5_ib_dbg_param { |
| 787 | int offset; |
| 788 | struct mlx5_ib_dev *dev; |
| 789 | struct dentry *dentry; |
Parav Pandit | a9e546e | 2018-01-04 17:25:39 +0200 | [diff] [blame] | 790 | u8 port_num; |
Parav Pandit | 4a2da0b | 2017-05-30 10:05:15 +0300 | [diff] [blame] | 791 | }; |
| 792 | |
| 793 | enum mlx5_ib_dbg_cc_types { |
| 794 | MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE, |
| 795 | MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI, |
| 796 | MLX5_IB_DBG_CC_RP_TIME_RESET, |
| 797 | MLX5_IB_DBG_CC_RP_BYTE_RESET, |
| 798 | MLX5_IB_DBG_CC_RP_THRESHOLD, |
| 799 | MLX5_IB_DBG_CC_RP_AI_RATE, |
Parav Pandit | 9e3aaf6 | 2020-02-27 14:52:46 +0200 | [diff] [blame] | 800 | MLX5_IB_DBG_CC_RP_MAX_RATE, |
Parav Pandit | 4a2da0b | 2017-05-30 10:05:15 +0300 | [diff] [blame] | 801 | MLX5_IB_DBG_CC_RP_HAI_RATE, |
| 802 | MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, |
| 803 | MLX5_IB_DBG_CC_RP_MIN_RATE, |
| 804 | MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP, |
| 805 | MLX5_IB_DBG_CC_RP_DCE_TCP_G, |
| 806 | MLX5_IB_DBG_CC_RP_DCE_TCP_RTT, |
| 807 | MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, |
| 808 | MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, |
| 809 | MLX5_IB_DBG_CC_RP_GD, |
Parav Pandit | 9e3aaf6 | 2020-02-27 14:52:46 +0200 | [diff] [blame] | 810 | MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS, |
Parav Pandit | 4a2da0b | 2017-05-30 10:05:15 +0300 | [diff] [blame] | 811 | MLX5_IB_DBG_CC_NP_CNP_DSCP, |
| 812 | MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, |
| 813 | MLX5_IB_DBG_CC_NP_CNP_PRIO, |
| 814 | MLX5_IB_DBG_CC_MAX, |
| 815 | }; |
| 816 | |
| 817 | struct mlx5_ib_dbg_cc_params { |
| 818 | struct dentry *root; |
| 819 | struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX]; |
| 820 | }; |
| 821 | |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 822 | enum { |
| 823 | MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100, |
| 824 | }; |
| 825 | |
| 826 | struct mlx5_ib_delay_drop { |
| 827 | struct mlx5_ib_dev *dev; |
| 828 | struct work_struct delay_drop_work; |
| 829 | /* serialize setting of delay drop */ |
| 830 | struct mutex lock; |
| 831 | u32 timeout; |
| 832 | bool activate; |
Maor Gottlieb | fe248c3 | 2017-05-30 10:29:14 +0300 | [diff] [blame] | 833 | atomic_t events_cnt; |
| 834 | atomic_t rqs_cnt; |
Greg Kroah-Hartman | 09b0965 | 2019-11-04 08:38:07 +0100 | [diff] [blame] | 835 | struct dentry *dir_debugfs; |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 836 | }; |
| 837 | |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 838 | enum mlx5_ib_stages { |
| 839 | MLX5_IB_STAGE_INIT, |
Mark Bloch | 9a4ca38 | 2018-01-16 14:42:35 +0000 | [diff] [blame] | 840 | MLX5_IB_STAGE_FLOW_DB, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 841 | MLX5_IB_STAGE_CAPS, |
Mark Bloch | 8e6efa3 | 2017-11-06 12:22:13 +0000 | [diff] [blame] | 842 | MLX5_IB_STAGE_NON_DEFAULT_CB, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 843 | MLX5_IB_STAGE_ROCE, |
Leon Romanovsky | 333fbaa | 2020-04-04 10:40:24 +0300 | [diff] [blame] | 844 | MLX5_IB_STAGE_QP, |
Leon Romanovsky | f3da657 | 2018-11-28 20:53:41 +0200 | [diff] [blame] | 845 | MLX5_IB_STAGE_SRQ, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 846 | MLX5_IB_STAGE_DEVICE_RESOURCES, |
Saeed Mahameed | df097a2 | 2018-11-26 14:39:00 -0800 | [diff] [blame] | 847 | MLX5_IB_STAGE_DEVICE_NOTIFIER, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 848 | MLX5_IB_STAGE_ODP, |
| 849 | MLX5_IB_STAGE_COUNTERS, |
| 850 | MLX5_IB_STAGE_CONG_DEBUGFS, |
| 851 | MLX5_IB_STAGE_UAR, |
| 852 | MLX5_IB_STAGE_BFREG, |
Mark Bloch | 42cea83 | 2018-03-14 09:14:15 +0200 | [diff] [blame] | 853 | MLX5_IB_STAGE_PRE_IB_REG_UMR, |
Leon Romanovsky | 81773ce | 2018-11-28 20:53:39 +0200 | [diff] [blame] | 854 | MLX5_IB_STAGE_WHITELIST_UID, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 855 | MLX5_IB_STAGE_IB_REG, |
Mark Bloch | 42cea83 | 2018-03-14 09:14:15 +0200 | [diff] [blame] | 856 | MLX5_IB_STAGE_POST_IB_REG_UMR, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 857 | MLX5_IB_STAGE_DELAY_DROP, |
| 858 | MLX5_IB_STAGE_CLASS_ATTR, |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 859 | MLX5_IB_STAGE_MAX, |
| 860 | }; |
| 861 | |
| 862 | struct mlx5_ib_stage { |
| 863 | int (*init)(struct mlx5_ib_dev *dev); |
| 864 | void (*cleanup)(struct mlx5_ib_dev *dev); |
| 865 | }; |
| 866 | |
| 867 | #define STAGE_CREATE(_stage, _init, _cleanup) \ |
| 868 | .stage[_stage] = {.init = _init, .cleanup = _cleanup} |
| 869 | |
| 870 | struct mlx5_ib_profile { |
| 871 | struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX]; |
| 872 | }; |
| 873 | |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 874 | struct mlx5_ib_multiport_info { |
| 875 | struct list_head list; |
| 876 | struct mlx5_ib_dev *ibdev; |
| 877 | struct mlx5_core_dev *mdev; |
Saeed Mahameed | df097a2 | 2018-11-26 14:39:00 -0800 | [diff] [blame] | 878 | struct notifier_block mdev_events; |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 879 | struct completion unref_comp; |
| 880 | u64 sys_image_guid; |
| 881 | u32 mdev_refcnt; |
| 882 | bool is_master; |
| 883 | bool unaffiliate; |
| 884 | }; |
| 885 | |
Aviad Yehezkel | c6475a0 | 2018-03-28 09:27:50 +0300 | [diff] [blame] | 886 | struct mlx5_ib_flow_action { |
| 887 | struct ib_flow_action ib_action; |
| 888 | union { |
| 889 | struct { |
| 890 | u64 ib_flags; |
| 891 | struct mlx5_accel_esp_xfrm *ctx; |
| 892 | } esp_aes_gcm; |
Mark Bloch | b4749bf | 2018-08-28 14:18:51 +0300 | [diff] [blame] | 893 | struct { |
| 894 | struct mlx5_ib_dev *dev; |
| 895 | u32 sub_type; |
Maor Gottlieb | 2b688ea | 2019-08-15 13:54:17 +0300 | [diff] [blame] | 896 | union { |
| 897 | struct mlx5_modify_hdr *modify_hdr; |
| 898 | struct mlx5_pkt_reformat *pkt_reformat; |
| 899 | }; |
Mark Bloch | b4749bf | 2018-08-28 14:18:51 +0300 | [diff] [blame] | 900 | } flow_action_raw; |
Aviad Yehezkel | c6475a0 | 2018-03-28 09:27:50 +0300 | [diff] [blame] | 901 | }; |
| 902 | }; |
| 903 | |
Ariel Levkovich | 3b113a1 | 2019-05-05 17:07:11 +0300 | [diff] [blame] | 904 | struct mlx5_dm { |
Ariel Levkovich | 24da001 | 2018-04-05 18:53:27 +0300 | [diff] [blame] | 905 | struct mlx5_core_dev *dev; |
Ariel Levkovich | 3b113a1 | 2019-05-05 17:07:11 +0300 | [diff] [blame] | 906 | /* This lock is used to protect the access to the shared |
| 907 | * allocation map when concurrent requests by different |
| 908 | * processes are handled. |
| 909 | */ |
| 910 | spinlock_t lock; |
Ariel Levkovich | 24da001 | 2018-04-05 18:53:27 +0300 | [diff] [blame] | 911 | DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); |
| 912 | }; |
| 913 | |
Raed Salem | 5e95af5 | 2018-05-31 16:43:40 +0300 | [diff] [blame] | 914 | struct mlx5_read_counters_attr { |
| 915 | struct mlx5_fc *hw_cntrs_hndl; |
| 916 | u64 *out; |
| 917 | u32 flags; |
| 918 | }; |
| 919 | |
Raed Salem | 3b3233f | 2018-05-31 16:43:39 +0300 | [diff] [blame] | 920 | enum mlx5_ib_counters_type { |
| 921 | MLX5_IB_COUNTERS_FLOW, |
| 922 | }; |
| 923 | |
Raed Salem | b29e2a1 | 2018-05-31 16:43:38 +0300 | [diff] [blame] | 924 | struct mlx5_ib_mcounters { |
| 925 | struct ib_counters ibcntrs; |
Raed Salem | 3b3233f | 2018-05-31 16:43:39 +0300 | [diff] [blame] | 926 | enum mlx5_ib_counters_type type; |
Raed Salem | 5e95af5 | 2018-05-31 16:43:40 +0300 | [diff] [blame] | 927 | /* number of counters supported for this counters type */ |
| 928 | u32 counters_num; |
| 929 | struct mlx5_fc *hw_cntrs_hndl; |
| 930 | /* read function for this counters type */ |
| 931 | int (*read_counters)(struct ib_device *ibdev, |
| 932 | struct mlx5_read_counters_attr *read_attr); |
Raed Salem | 3b3233f | 2018-05-31 16:43:39 +0300 | [diff] [blame] | 933 | /* max index set as part of create_flow */ |
| 934 | u32 cntrs_max_index; |
| 935 | /* number of counters data entries (<description,index> pair) */ |
| 936 | u32 ncounters; |
| 937 | /* counters data array for descriptions and indexes */ |
| 938 | struct mlx5_ib_flow_counters_desc *counters_data; |
| 939 | /* protects access to mcounters internal data */ |
| 940 | struct mutex mcntrs_mutex; |
Raed Salem | b29e2a1 | 2018-05-31 16:43:38 +0300 | [diff] [blame] | 941 | }; |
| 942 | |
| 943 | static inline struct mlx5_ib_mcounters * |
| 944 | to_mcounters(struct ib_counters *ibcntrs) |
| 945 | { |
| 946 | return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs); |
| 947 | } |
| 948 | |
Mark Bloch | 2ea2620 | 2018-09-06 17:27:03 +0300 | [diff] [blame] | 949 | int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, |
| 950 | bool is_egress, |
| 951 | struct mlx5_flow_act *action); |
Mark Bloch | a560f1d | 2018-09-17 13:30:47 +0300 | [diff] [blame] | 952 | struct mlx5_ib_lb_state { |
| 953 | /* protect the user_td */ |
| 954 | struct mutex mutex; |
| 955 | u32 user_td; |
Mark Bloch | 0042f9e | 2018-09-17 13:30:49 +0300 | [diff] [blame] | 956 | int qps; |
| 957 | bool enabled; |
Mark Bloch | a560f1d | 2018-09-17 13:30:47 +0300 | [diff] [blame] | 958 | }; |
| 959 | |
Saeed Mahameed | d5d284b | 2018-11-19 10:52:41 -0800 | [diff] [blame] | 960 | struct mlx5_ib_pf_eq { |
Yuval Avnery | ca39079 | 2019-06-10 23:38:23 +0000 | [diff] [blame] | 961 | struct notifier_block irq_nb; |
Saeed Mahameed | d5d284b | 2018-11-19 10:52:41 -0800 | [diff] [blame] | 962 | struct mlx5_ib_dev *dev; |
| 963 | struct mlx5_eq *core; |
| 964 | struct work_struct work; |
| 965 | spinlock_t lock; /* Pagefaults spinlock */ |
| 966 | struct workqueue_struct *wq; |
| 967 | mempool_t *pool; |
| 968 | }; |
| 969 | |
Yishai Hadas | e337dd5 | 2019-06-30 19:23:30 +0300 | [diff] [blame] | 970 | struct mlx5_devx_event_table { |
| 971 | struct mlx5_nb devx_nb; |
| 972 | /* serialize updating the event_xa */ |
| 973 | struct mutex event_xa_lock; |
| 974 | struct xarray event_xa; |
| 975 | }; |
| 976 | |
Yishai Hadas | f164be8 | 2019-12-12 13:09:26 +0200 | [diff] [blame] | 977 | struct mlx5_var_table { |
| 978 | /* serialize updating the bitmap */ |
| 979 | struct mutex bitmap_lock; |
| 980 | unsigned long *bitmap; |
| 981 | u64 hw_start_addr; |
| 982 | u32 stride_size; |
| 983 | u64 num_var_hw_entries; |
| 984 | }; |
| 985 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 986 | struct mlx5_ib_dev { |
| 987 | struct ib_device ib_dev; |
Jack Morgenstein | 9603b61 | 2014-07-28 23:30:22 +0300 | [diff] [blame] | 988 | struct mlx5_core_dev *mdev; |
Saeed Mahameed | df097a2 | 2018-11-26 14:39:00 -0800 | [diff] [blame] | 989 | struct notifier_block mdev_events; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 990 | int num_ports; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 991 | /* serialize update of capability mask |
| 992 | */ |
| 993 | struct mutex cap_mask_mutex; |
Leon Romanovsky | 4b2a673 | 2019-10-02 15:25:14 +0300 | [diff] [blame] | 994 | u8 ib_active:1; |
Leon Romanovsky | 4b2a673 | 2019-10-02 15:25:14 +0300 | [diff] [blame] | 995 | u8 is_rep:1; |
| 996 | u8 lag_active:1; |
Michael Guralnik | 11f552e | 2019-06-10 15:21:24 +0300 | [diff] [blame] | 997 | u8 wc_support:1; |
Jason Gunthorpe | b9358bd | 2020-03-10 10:22:36 +0200 | [diff] [blame] | 998 | u8 fill_delay; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 999 | struct umr_common umrc; |
| 1000 | /* sync used page count stats |
| 1001 | */ |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1002 | struct mlx5_ib_resources devr; |
Saeed Mahameed | fc6a9f8 | 2020-03-10 10:22:28 +0200 | [diff] [blame] | 1003 | |
Saeed Mahameed | f743ff3 | 2020-03-10 10:22:29 +0200 | [diff] [blame] | 1004 | atomic_t mkey_var; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1005 | struct mlx5_mr_cache cache; |
Eli Cohen | 746b558 | 2013-10-23 09:53:14 +0300 | [diff] [blame] | 1006 | struct timer_list delay_timer; |
Moshe Lazer | 6bc1a65 | 2016-10-27 16:36:42 +0300 | [diff] [blame] | 1007 | /* Prevents soft lock on massive reg MRs */ |
| 1008 | struct mutex slow_path_mutex; |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1009 | struct ib_odp_caps odp_caps; |
Artemy Kovalyov | c438fde | 2017-01-02 11:37:43 +0200 | [diff] [blame] | 1010 | u64 odp_max_size; |
Saeed Mahameed | d5d284b | 2018-11-19 10:52:41 -0800 | [diff] [blame] | 1011 | struct mlx5_ib_pf_eq odp_pf_eq; |
| 1012 | |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1013 | /* |
| 1014 | * Sleepable RCU that prevents destruction of MRs while they are still |
| 1015 | * being used by a page fault handler. |
| 1016 | */ |
Jason Gunthorpe | 806b101 | 2019-10-09 13:09:23 -0300 | [diff] [blame] | 1017 | struct srcu_struct odp_srcu; |
| 1018 | struct xarray odp_mkeys; |
| 1019 | |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 1020 | u32 null_mkey; |
Mark Bloch | 9a4ca38 | 2018-01-16 14:42:35 +0000 | [diff] [blame] | 1021 | struct mlx5_ib_flow_db *flow_db; |
Maor Gottlieb | 89ea94a7 | 2016-06-17 15:01:38 +0300 | [diff] [blame] | 1022 | /* protect resources needed as part of reset flow */ |
| 1023 | spinlock_t reset_flow_resource_lock; |
| 1024 | struct list_head qp_list; |
Mark Bloch | 0837e86 | 2016-06-17 15:10:55 +0300 | [diff] [blame] | 1025 | /* Array with num_ports elements */ |
| 1026 | struct mlx5_ib_port *port; |
Huy Nguyen | c85023e | 2017-05-30 09:42:54 +0300 | [diff] [blame] | 1027 | struct mlx5_sq_bfreg bfreg; |
Michael Guralnik | 11f552e | 2019-06-10 15:21:24 +0300 | [diff] [blame] | 1028 | struct mlx5_sq_bfreg wc_bfreg; |
Huy Nguyen | c85023e | 2017-05-30 09:42:54 +0300 | [diff] [blame] | 1029 | struct mlx5_sq_bfreg fp_bfreg; |
Maor Gottlieb | 03404e8 | 2017-05-30 10:29:13 +0300 | [diff] [blame] | 1030 | struct mlx5_ib_delay_drop delay_drop; |
Mark Bloch | 16c1975 | 2018-01-01 13:06:58 +0200 | [diff] [blame] | 1031 | const struct mlx5_ib_profile *profile; |
Huy Nguyen | c85023e | 2017-05-30 09:42:54 +0300 | [diff] [blame] | 1032 | |
Mark Bloch | a560f1d | 2018-09-17 13:30:47 +0300 | [diff] [blame] | 1033 | struct mlx5_ib_lb_state lb; |
Huy Nguyen | c85023e | 2017-05-30 09:42:54 +0300 | [diff] [blame] | 1034 | u8 umr_fence; |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 1035 | struct list_head ib_dev_list; |
| 1036 | u64 sys_image_guid; |
Ariel Levkovich | 3b113a1 | 2019-05-05 17:07:11 +0300 | [diff] [blame] | 1037 | struct mlx5_dm dm; |
Yishai Hadas | 76dc5a8 | 2018-09-20 21:45:19 +0300 | [diff] [blame] | 1038 | u16 devx_whitelist_uid; |
Leon Romanovsky | f3da657 | 2018-11-28 20:53:41 +0200 | [diff] [blame] | 1039 | struct mlx5_srq_table srq_table; |
Leon Romanovsky | 333fbaa | 2020-04-04 10:40:24 +0300 | [diff] [blame] | 1040 | struct mlx5_qp_table qp_table; |
Jason Gunthorpe | e355477 | 2019-01-18 16:33:10 -0800 | [diff] [blame] | 1041 | struct mlx5_async_ctx async_ctx; |
Yishai Hadas | e337dd5 | 2019-06-30 19:23:30 +0300 | [diff] [blame] | 1042 | struct mlx5_devx_event_table devx_event_table; |
Yishai Hadas | f164be8 | 2019-12-12 13:09:26 +0200 | [diff] [blame] | 1043 | struct mlx5_var_table var_table; |
Jason Gunthorpe | 50211ec | 2019-10-09 13:09:22 -0300 | [diff] [blame] | 1044 | |
| 1045 | struct xarray sig_mrs; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1046 | }; |
| 1047 | |
| 1048 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) |
| 1049 | { |
| 1050 | return container_of(mcq, struct mlx5_ib_cq, mcq); |
| 1051 | } |
| 1052 | |
| 1053 | static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) |
| 1054 | { |
| 1055 | return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); |
| 1056 | } |
| 1057 | |
| 1058 | static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) |
| 1059 | { |
| 1060 | return container_of(ibdev, struct mlx5_ib_dev, ib_dev); |
| 1061 | } |
| 1062 | |
Jason Gunthorpe | e79c9c6 | 2019-04-01 17:08:23 -0300 | [diff] [blame] | 1063 | static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata) |
| 1064 | { |
| 1065 | struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( |
| 1066 | udata, struct mlx5_ib_ucontext, ibucontext); |
| 1067 | |
| 1068 | return to_mdev(context->ibucontext.device); |
| 1069 | } |
| 1070 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1071 | static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) |
| 1072 | { |
| 1073 | return container_of(ibcq, struct mlx5_ib_cq, ibcq); |
| 1074 | } |
| 1075 | |
| 1076 | static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) |
| 1077 | { |
majd@mellanox.com | 19098df | 2016-01-14 19:13:03 +0200 | [diff] [blame] | 1078 | return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1079 | } |
| 1080 | |
Yishai Hadas | 350d0e4 | 2016-08-28 14:58:18 +0300 | [diff] [blame] | 1081 | static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp) |
| 1082 | { |
| 1083 | return container_of(core_qp, struct mlx5_ib_rwq, core_qp); |
| 1084 | } |
| 1085 | |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 1086 | static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey) |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 1087 | { |
Matan Barak | a606b0f | 2016-02-29 18:05:28 +0200 | [diff] [blame] | 1088 | return container_of(mmkey, struct mlx5_ib_mr, mmkey); |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 1089 | } |
| 1090 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1091 | static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) |
| 1092 | { |
| 1093 | return container_of(ibpd, struct mlx5_ib_pd, ibpd); |
| 1094 | } |
| 1095 | |
| 1096 | static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) |
| 1097 | { |
| 1098 | return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); |
| 1099 | } |
| 1100 | |
| 1101 | static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) |
| 1102 | { |
| 1103 | return container_of(ibqp, struct mlx5_ib_qp, ibqp); |
| 1104 | } |
| 1105 | |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 1106 | static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq) |
| 1107 | { |
| 1108 | return container_of(ibwq, struct mlx5_ib_rwq, ibwq); |
| 1109 | } |
| 1110 | |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 1111 | static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) |
| 1112 | { |
| 1113 | return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl); |
| 1114 | } |
| 1115 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1116 | static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) |
| 1117 | { |
| 1118 | return container_of(msrq, struct mlx5_ib_srq, msrq); |
| 1119 | } |
| 1120 | |
Ariel Levkovich | 24da001 | 2018-04-05 18:53:27 +0300 | [diff] [blame] | 1121 | static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm) |
| 1122 | { |
| 1123 | return container_of(ibdm, struct mlx5_ib_dm, ibdm); |
| 1124 | } |
| 1125 | |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1126 | static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) |
| 1127 | { |
| 1128 | return container_of(ibmr, struct mlx5_ib_mr, ibmr); |
| 1129 | } |
| 1130 | |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 1131 | static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) |
| 1132 | { |
| 1133 | return container_of(ibmw, struct mlx5_ib_mw, ibmw); |
| 1134 | } |
| 1135 | |
Aviad Yehezkel | c6475a0 | 2018-03-28 09:27:50 +0300 | [diff] [blame] | 1136 | static inline struct mlx5_ib_flow_action * |
| 1137 | to_mflow_act(struct ib_flow_action *ibact) |
| 1138 | { |
| 1139 | return container_of(ibact, struct mlx5_ib_flow_action, ib_action); |
| 1140 | } |
| 1141 | |
Yishai Hadas | dc2316e | 2019-12-12 12:02:37 +0200 | [diff] [blame] | 1142 | static inline struct mlx5_user_mmap_entry * |
| 1143 | to_mmmap(struct rdma_user_mmap_entry *rdma_entry) |
| 1144 | { |
| 1145 | return container_of(rdma_entry, |
| 1146 | struct mlx5_user_mmap_entry, rdma_entry); |
| 1147 | } |
| 1148 | |
Jason Gunthorpe | b0ea0fa | 2019-01-09 11:15:16 +0200 | [diff] [blame] | 1149 | int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, |
| 1150 | struct ib_udata *udata, unsigned long virt, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1151 | struct mlx5_db *db); |
| 1152 | void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); |
| 1153 | void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
| 1154 | void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
| 1155 | void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); |
Maor Gottlieb | fa5d010 | 2020-04-30 22:21:42 +0300 | [diff] [blame^] | 1156 | int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, |
Leon Romanovsky | d345691 | 2019-04-03 16:42:42 +0300 | [diff] [blame] | 1157 | struct ib_udata *udata); |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 1158 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
Leon Romanovsky | d345691 | 2019-04-03 16:42:42 +0300 | [diff] [blame] | 1159 | void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags); |
Leon Romanovsky | 68e326d | 2019-04-03 16:42:43 +0300 | [diff] [blame] | 1160 | int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, |
| 1161 | struct ib_udata *udata); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1162 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
| 1163 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
| 1164 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); |
Leon Romanovsky | 68e326d | 2019-04-03 16:42:43 +0300 | [diff] [blame] | 1165 | void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 1166 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
| 1167 | const struct ib_recv_wr **bad_wr); |
Mark Bloch | 0042f9e | 2018-09-17 13:30:49 +0300 | [diff] [blame] | 1168 | int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); |
| 1169 | void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1170 | struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, |
| 1171 | struct ib_qp_init_attr *init_attr, |
| 1172 | struct ib_udata *udata); |
| 1173 | int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
| 1174 | int attr_mask, struct ib_udata *udata); |
| 1175 | int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, |
| 1176 | struct ib_qp_init_attr *qp_init_attr); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 1177 | int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
Yishai Hadas | d0e84c0 | 2018-06-19 10:43:55 +0300 | [diff] [blame] | 1178 | void mlx5_ib_drain_sq(struct ib_qp *qp); |
| 1179 | void mlx5_ib_drain_rq(struct ib_qp *qp); |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 1180 | int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
| 1181 | const struct ib_send_wr **bad_wr); |
| 1182 | int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
| 1183 | const struct ib_recv_wr **bad_wr); |
Moni Shoua | da9ee9d | 2020-01-15 14:43:34 +0200 | [diff] [blame] | 1184 | int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, |
| 1185 | size_t buflen, size_t *bc); |
| 1186 | int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, |
| 1187 | size_t buflen, size_t *bc); |
| 1188 | int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, |
| 1189 | size_t buflen, size_t *bc); |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 1190 | int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
| 1191 | struct ib_udata *udata); |
Leon Romanovsky | a52c8e2 | 2019-05-28 14:37:28 +0300 | [diff] [blame] | 1192 | void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1193 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
| 1194 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
| 1195 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
| 1196 | int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); |
| 1197 | struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); |
| 1198 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
| 1199 | u64 virt_addr, int access_flags, |
| 1200 | struct ib_udata *udata); |
Moni Shoua | 813e90b | 2018-12-11 13:37:53 +0200 | [diff] [blame] | 1201 | int mlx5_ib_advise_mr(struct ib_pd *pd, |
| 1202 | enum ib_uverbs_advise_mr_advice advice, |
| 1203 | u32 flags, |
| 1204 | struct ib_sge *sg_list, |
| 1205 | u32 num_sge, |
| 1206 | struct uverbs_attr_bundle *attrs); |
Matan Barak | d2370e0 | 2016-02-29 18:05:30 +0200 | [diff] [blame] | 1207 | struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
| 1208 | struct ib_udata *udata); |
| 1209 | int mlx5_ib_dealloc_mw(struct ib_mw *mw); |
Artemy Kovalyov | 7d0cc6e | 2017-01-02 11:37:44 +0200 | [diff] [blame] | 1210 | int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, |
| 1211 | int page_shift, int flags); |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 1212 | struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, |
Jason Gunthorpe | b0ea0fa | 2019-01-09 11:15:16 +0200 | [diff] [blame] | 1213 | struct ib_udata *udata, |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 1214 | int access_flags); |
| 1215 | void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); |
Jason Gunthorpe | 0968970 | 2019-10-09 13:09:34 -0300 | [diff] [blame] | 1216 | void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr); |
Noa Osherovich | 56e11d6 | 2016-02-29 16:46:51 +0200 | [diff] [blame] | 1217 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, |
| 1218 | u64 length, u64 virt_addr, int access_flags, |
| 1219 | struct ib_pd *pd, struct ib_udata *udata); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 1220 | int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); |
| 1221 | struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
| 1222 | u32 max_num_sg, struct ib_udata *udata); |
Max Gurtovoy | 6c98447 | 2019-06-11 18:52:42 +0300 | [diff] [blame] | 1223 | struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, |
| 1224 | u32 max_num_sg, |
| 1225 | u32 max_num_meta_sg); |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 1226 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 1227 | unsigned int *sg_offset); |
Max Gurtovoy | 6c98447 | 2019-06-11 18:52:42 +0300 | [diff] [blame] | 1228 | int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, |
| 1229 | int data_sg_nents, unsigned int *data_sg_offset, |
| 1230 | struct scatterlist *meta_sg, int meta_sg_nents, |
| 1231 | unsigned int *meta_sg_offset); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1232 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
Ira Weiny | a97e2d8 | 2015-05-31 17:15:30 -0400 | [diff] [blame] | 1233 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
Leon Romanovsky | e26e7b8 | 2019-10-29 08:27:45 +0200 | [diff] [blame] | 1234 | const struct ib_mad *in, struct ib_mad *out, |
| 1235 | size_t *out_mad_size, u16 *out_mad_pkey_index); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1236 | struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, |
Shamir Rabinovitch | ff23dfa | 2019-03-31 19:10:07 +0300 | [diff] [blame] | 1237 | struct ib_udata *udata); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 1238 | int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1239 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); |
| 1240 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); |
Majd Dibbiny | 1b5daf1 | 2015-06-04 19:30:46 +0300 | [diff] [blame] | 1241 | int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, |
| 1242 | struct ib_smp *out_mad); |
| 1243 | int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, |
| 1244 | __be64 *sys_image_guid); |
| 1245 | int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, |
| 1246 | u16 *max_pkeys); |
| 1247 | int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, |
| 1248 | u32 *vendor_id); |
| 1249 | int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); |
| 1250 | int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); |
| 1251 | int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, |
| 1252 | u16 *pkey); |
| 1253 | int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, |
| 1254 | union ib_gid *gid); |
| 1255 | int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, |
| 1256 | struct ib_port_attr *props); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1257 | int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, |
| 1258 | struct ib_port_attr *props); |
| 1259 | int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); |
| 1260 | void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); |
Majd Dibbiny | 762f899 | 2016-10-27 16:36:47 +0300 | [diff] [blame] | 1261 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, |
| 1262 | unsigned long max_page_shift, |
| 1263 | int *count, int *shift, |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1264 | int *ncont, int *order); |
Haggai Eran | 832a6b0 | 2014-12-11 17:04:22 +0200 | [diff] [blame] | 1265 | void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, |
| 1266 | int page_shift, size_t offset, size_t num_pages, |
| 1267 | __be64 *pas, int access_flags); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1268 | void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 1269 | int page_shift, __be64 *pas, int access_flags); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1270 | void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); |
Yonatan Cohen | 5d6ff1b | 2018-10-09 12:05:13 +0300 | [diff] [blame] | 1271 | int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1272 | int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); |
| 1273 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); |
Artemy Kovalyov | 49780d4 | 2017-01-18 16:58:10 +0200 | [diff] [blame] | 1274 | |
Jason Gunthorpe | b91e175 | 2020-03-10 10:22:32 +0200 | [diff] [blame] | 1275 | struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, |
| 1276 | unsigned int entry); |
Artemy Kovalyov | 49780d4 | 2017-01-18 16:58:10 +0200 | [diff] [blame] | 1277 | void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
Jason Gunthorpe | 0968970 | 2019-10-09 13:09:34 -0300 | [diff] [blame] | 1278 | int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr); |
| 1279 | |
Sagi Grimberg | d5436ba | 2014-02-23 14:19:12 +0200 | [diff] [blame] | 1280 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, |
| 1281 | struct ib_mr_status *mr_status); |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 1282 | struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, |
| 1283 | struct ib_wq_init_attr *init_attr, |
| 1284 | struct ib_udata *udata); |
Leon Romanovsky | a49b1dc | 2019-06-12 15:27:41 +0300 | [diff] [blame] | 1285 | void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); |
Yishai Hadas | 79b20a6 | 2016-05-23 15:20:50 +0300 | [diff] [blame] | 1286 | int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
| 1287 | u32 wq_attr_mask, struct ib_udata *udata); |
Yishai Hadas | c5f9092 | 2016-05-23 15:20:53 +0300 | [diff] [blame] | 1288 | struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, |
| 1289 | struct ib_rwq_ind_table_init_attr *init_attr, |
| 1290 | struct ib_udata *udata); |
| 1291 | int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); |
Ariel Levkovich | 24da001 | 2018-04-05 18:53:27 +0300 | [diff] [blame] | 1292 | struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, |
| 1293 | struct ib_ucontext *context, |
| 1294 | struct ib_dm_alloc_attr *attr, |
| 1295 | struct uverbs_attr_bundle *attrs); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 1296 | int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs); |
Ariel Levkovich | 6c29f57 | 2018-04-05 18:53:29 +0300 | [diff] [blame] | 1297 | struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, |
| 1298 | struct ib_dm_mr_attr *attr, |
| 1299 | struct uverbs_attr_bundle *attrs); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1300 | |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1301 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 1302 | void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1303 | int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); |
Saeed Mahameed | d5d284b | 2018-11-19 10:52:41 -0800 | [diff] [blame] | 1304 | void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1305 | int __init mlx5_ib_odp_init(void); |
| 1306 | void mlx5_ib_odp_cleanup(void); |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 1307 | void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); |
Artemy Kovalyov | cbe4b8f | 2019-12-22 14:46:47 +0200 | [diff] [blame] | 1308 | void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, |
| 1309 | struct mlx5_ib_mr *mr, int flags); |
Moni Shoua | 813e90b | 2018-12-11 13:37:53 +0200 | [diff] [blame] | 1310 | |
| 1311 | int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, |
| 1312 | enum ib_uverbs_advise_mr_advice advice, |
| 1313 | u32 flags, struct ib_sge *sg_list, u32 num_sge); |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1314 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 1315 | static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1316 | { |
Saeed Mahameed | 938fe83 | 2015-05-28 22:28:41 +0300 | [diff] [blame] | 1317 | return; |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1318 | } |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1319 | |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1320 | static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } |
Saeed Mahameed | d5d284b | 2018-11-19 10:52:41 -0800 | [diff] [blame] | 1321 | static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1322 | static inline int mlx5_ib_odp_init(void) { return 0; } |
Artemy Kovalyov | 81713d3 | 2017-01-18 16:58:11 +0200 | [diff] [blame] | 1323 | static inline void mlx5_ib_odp_cleanup(void) {} |
| 1324 | static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} |
Artemy Kovalyov | cbe4b8f | 2019-12-22 14:46:47 +0200 | [diff] [blame] | 1325 | static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, |
| 1326 | struct mlx5_ib_mr *mr, int flags) {} |
Haggai Eran | 6aec21f | 2014-12-11 17:04:23 +0200 | [diff] [blame] | 1327 | |
Doug Ledford | c9e585e | 2018-12-19 13:43:17 -0500 | [diff] [blame] | 1328 | static inline int |
| 1329 | mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, |
| 1330 | enum ib_uverbs_advise_mr_advice advice, u32 flags, |
| 1331 | struct ib_sge *sg_list, u32 num_sge) |
Moni Shoua | 813e90b | 2018-12-11 13:37:53 +0200 | [diff] [blame] | 1332 | { |
| 1333 | return -EOPNOTSUPP; |
| 1334 | } |
Haggai Eran | 8cdd312 | 2014-12-11 17:04:20 +0200 | [diff] [blame] | 1335 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
| 1336 | |
Jason Gunthorpe | f25a546 | 2019-11-12 16:22:22 -0400 | [diff] [blame] | 1337 | extern const struct mmu_interval_notifier_ops mlx5_mn_ops; |
| 1338 | |
Mark Bloch | b5ca15a | 2018-01-23 11:16:30 +0000 | [diff] [blame] | 1339 | /* Needed for rep profile */ |
Mark Bloch | b5ca15a | 2018-01-23 11:16:30 +0000 | [diff] [blame] | 1340 | void __mlx5_ib_remove(struct mlx5_ib_dev *dev, |
| 1341 | const struct mlx5_ib_profile *profile, |
| 1342 | int stage); |
| 1343 | void *__mlx5_ib_add(struct mlx5_ib_dev *dev, |
| 1344 | const struct mlx5_ib_profile *profile); |
| 1345 | |
Arnd Bergmann | 9967c70 | 2016-03-23 11:37:45 +0100 | [diff] [blame] | 1346 | int mlx5_ib_get_vf_config(struct ib_device *device, int vf, |
| 1347 | u8 port, struct ifla_vf_info *info); |
| 1348 | int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, |
| 1349 | u8 port, int state); |
| 1350 | int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, |
| 1351 | u8 port, struct ifla_vf_stats *stats); |
Danit Goldberg | 9c0015e | 2019-11-06 15:18:12 +0200 | [diff] [blame] | 1352 | int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port, |
| 1353 | struct ifla_vf_guid *node_guid, |
| 1354 | struct ifla_vf_guid *port_guid); |
Arnd Bergmann | 9967c70 | 2016-03-23 11:37:45 +0100 | [diff] [blame] | 1355 | int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, |
| 1356 | u64 guid, int type); |
| 1357 | |
Parav Pandit | 47ec386 | 2018-06-13 10:22:06 +0300 | [diff] [blame] | 1358 | __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, |
| 1359 | const struct ib_gid_attr *attr); |
Achiad Shochat | 2811ba5 | 2015-12-23 18:47:24 +0200 | [diff] [blame] | 1360 | |
Parav Pandit | a9e546e | 2018-01-04 17:25:39 +0200 | [diff] [blame] | 1361 | void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); |
Greg Kroah-Hartman | 73eb8f0 | 2019-01-22 16:17:57 +0100 | [diff] [blame] | 1362 | void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); |
Parav Pandit | 4a2da0b | 2017-05-30 10:05:15 +0300 | [diff] [blame] | 1363 | |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 1364 | /* GSI QP helper functions */ |
| 1365 | struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, |
| 1366 | struct ib_qp_init_attr *init_attr); |
| 1367 | int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp); |
| 1368 | int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, |
| 1369 | int attr_mask); |
| 1370 | int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
| 1371 | int qp_attr_mask, |
| 1372 | struct ib_qp_init_attr *qp_init_attr); |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 1373 | int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr, |
| 1374 | const struct ib_send_wr **bad_wr); |
| 1375 | int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr, |
| 1376 | const struct ib_recv_wr **bad_wr); |
Haggai Eran | 7722f47 | 2016-02-29 15:45:07 +0200 | [diff] [blame] | 1377 | void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 1378 | |
Haggai Eran | 25361e0 | 2016-02-29 15:45:08 +0200 | [diff] [blame] | 1379 | int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); |
| 1380 | |
Yishai Hadas | 4ed131d | 2017-12-24 16:31:35 +0200 | [diff] [blame] | 1381 | void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, |
| 1382 | int bfregn); |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 1383 | struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi); |
| 1384 | struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev, |
| 1385 | u8 ib_port_num, |
| 1386 | u8 *native_port_num); |
| 1387 | void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, |
| 1388 | u8 port_num); |
Erez Alfasi | e1b95ae | 2019-10-16 09:23:07 +0300 | [diff] [blame] | 1389 | int mlx5_ib_fill_res_entry(struct sk_buff *msg, |
| 1390 | struct rdma_restrack_entry *res); |
Erez Alfasi | 4061ff7 | 2019-10-16 09:23:08 +0300 | [diff] [blame] | 1391 | int mlx5_ib_fill_stat_entry(struct sk_buff *msg, |
| 1392 | struct rdma_restrack_entry *res); |
Yishai Hadas | 4ed131d | 2017-12-24 16:31:35 +0200 | [diff] [blame] | 1393 | |
Jason Gunthorpe | 8889f6f | 2020-01-30 11:21:21 -0400 | [diff] [blame] | 1394 | extern const struct uapi_definition mlx5_ib_devx_defs[]; |
| 1395 | extern const struct uapi_definition mlx5_ib_flow_defs[]; |
Yishai Hadas | 30f2fe4 | 2020-02-19 21:05:18 +0200 | [diff] [blame] | 1396 | extern const struct uapi_definition mlx5_ib_qos_defs[]; |
Jason Gunthorpe | 8889f6f | 2020-01-30 11:21:21 -0400 | [diff] [blame] | 1397 | |
Yishai Hadas | a8b92ca | 2018-06-17 12:59:57 +0300 | [diff] [blame] | 1398 | #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) |
Yishai Hadas | fb98153 | 2018-11-26 08:28:36 +0200 | [diff] [blame] | 1399 | int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user); |
Yishai Hadas | 76dc5a8 | 2018-09-20 21:45:19 +0300 | [diff] [blame] | 1400 | void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid); |
Yishai Hadas | e337dd5 | 2019-06-30 19:23:30 +0300 | [diff] [blame] | 1401 | void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev); |
| 1402 | void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev); |
Yishai Hadas | 3226944 | 2018-07-23 15:25:09 +0300 | [diff] [blame] | 1403 | struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add( |
| 1404 | struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, |
Jianbo Liu | bb0ee7d | 2019-06-25 17:47:58 +0000 | [diff] [blame] | 1405 | struct mlx5_flow_context *flow_context, |
Mark Bloch | bfc5d83 | 2018-11-20 20:31:08 +0200 | [diff] [blame] | 1406 | struct mlx5_flow_act *flow_act, u32 counter_id, |
| 1407 | void *cmd_in, int inlen, int dest_id, int dest_type); |
Yishai Hadas | 3226944 | 2018-07-23 15:25:09 +0300 | [diff] [blame] | 1408 | bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type); |
Yevgeny Kliteynik | 208d70f | 2019-11-03 16:07:23 +0200 | [diff] [blame] | 1409 | bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id); |
Mark Bloch | b4749bf | 2018-08-28 14:18:51 +0300 | [diff] [blame] | 1410 | void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction); |
Yishai Hadas | a8b92ca | 2018-06-17 12:59:57 +0300 | [diff] [blame] | 1411 | #else |
| 1412 | static inline int |
Yishai Hadas | fb98153 | 2018-11-26 08:28:36 +0200 | [diff] [blame] | 1413 | mlx5_ib_devx_create(struct mlx5_ib_dev *dev, |
| 1414 | bool is_user) { return -EOPNOTSUPP; } |
Yishai Hadas | 76dc5a8 | 2018-09-20 21:45:19 +0300 | [diff] [blame] | 1415 | static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {} |
Yishai Hadas | e337dd5 | 2019-06-30 19:23:30 +0300 | [diff] [blame] | 1416 | static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) {} |
| 1417 | static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) {} |
Yishai Hadas | 3226944 | 2018-07-23 15:25:09 +0300 | [diff] [blame] | 1418 | static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, |
| 1419 | int *dest_type) |
| 1420 | { |
| 1421 | return false; |
| 1422 | } |
Mark Bloch | b4749bf | 2018-08-28 14:18:51 +0300 | [diff] [blame] | 1423 | static inline void |
| 1424 | mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) |
| 1425 | { |
| 1426 | return; |
| 1427 | }; |
Yishai Hadas | a8b92ca | 2018-06-17 12:59:57 +0300 | [diff] [blame] | 1428 | #endif |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1429 | static inline void init_query_mad(struct ib_smp *mad) |
| 1430 | { |
| 1431 | mad->base_version = 1; |
| 1432 | mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; |
| 1433 | mad->class_version = 1; |
| 1434 | mad->method = IB_MGMT_METHOD_GET; |
| 1435 | } |
| 1436 | |
| 1437 | static inline u8 convert_access(int acc) |
| 1438 | { |
| 1439 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | |
| 1440 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | |
| 1441 | (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | |
| 1442 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | |
| 1443 | MLX5_PERM_LOCAL_READ; |
| 1444 | } |
| 1445 | |
Sagi Grimberg | b636401 | 2015-09-02 22:23:04 +0300 | [diff] [blame] | 1446 | static inline int is_qp1(enum ib_qp_type qp_type) |
| 1447 | { |
Haggai Eran | d16e91d | 2016-02-29 15:45:05 +0200 | [diff] [blame] | 1448 | return qp_type == MLX5_IB_QPT_HW_GSI; |
Sagi Grimberg | b636401 | 2015-09-02 22:23:04 +0300 | [diff] [blame] | 1449 | } |
| 1450 | |
Haggai Eran | cc149f75 | 2014-12-11 17:04:21 +0200 | [diff] [blame] | 1451 | #define MLX5_MAX_UMR_SHIFT 16 |
| 1452 | #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT) |
| 1453 | |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 1454 | static inline u32 check_cq_create_flags(u32 flags) |
| 1455 | { |
| 1456 | /* |
| 1457 | * It returns non-zero value for unsupported CQ |
| 1458 | * create flags, otherwise it returns zero. |
| 1459 | */ |
Jason Gunthorpe | beb801a | 2018-01-26 15:16:46 -0700 | [diff] [blame] | 1460 | return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN | |
| 1461 | IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)); |
Leon Romanovsky | 051f263 | 2015-12-20 12:16:11 +0200 | [diff] [blame] | 1462 | } |
Haggai Abramovsky | cfb5e08 | 2016-01-14 19:12:57 +0200 | [diff] [blame] | 1463 | |
| 1464 | static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx, |
| 1465 | u32 *user_index) |
| 1466 | { |
| 1467 | if (cqe_version) { |
| 1468 | if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) || |
| 1469 | (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK)) |
| 1470 | return -EINVAL; |
| 1471 | *user_index = cmd_uidx; |
| 1472 | } else { |
| 1473 | *user_index = MLX5_IB_DEFAULT_UIDX; |
| 1474 | } |
| 1475 | |
| 1476 | return 0; |
| 1477 | } |
Leon Romanovsky | 3085e29 | 2016-09-22 17:31:11 +0300 | [diff] [blame] | 1478 | |
| 1479 | static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, |
| 1480 | struct mlx5_ib_create_qp *ucmd, |
| 1481 | int inlen, |
| 1482 | u32 *user_index) |
| 1483 | { |
| 1484 | u8 cqe_version = ucontext->cqe_version; |
| 1485 | |
Leon Romanovsky | a762d46 | 2020-03-10 11:14:31 +0200 | [diff] [blame] | 1486 | if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && |
| 1487 | (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
Leon Romanovsky | 3085e29 | 2016-09-22 17:31:11 +0300 | [diff] [blame] | 1488 | return 0; |
| 1489 | |
Leon Romanovsky | a762d46 | 2020-03-10 11:14:31 +0200 | [diff] [blame] | 1490 | if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) |
Leon Romanovsky | 3085e29 | 2016-09-22 17:31:11 +0300 | [diff] [blame] | 1491 | return -EINVAL; |
| 1492 | |
| 1493 | return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); |
| 1494 | } |
| 1495 | |
| 1496 | static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext, |
| 1497 | struct mlx5_ib_create_srq *ucmd, |
| 1498 | int inlen, |
| 1499 | u32 *user_index) |
| 1500 | { |
| 1501 | u8 cqe_version = ucontext->cqe_version; |
| 1502 | |
Leon Romanovsky | a762d46 | 2020-03-10 11:14:31 +0200 | [diff] [blame] | 1503 | if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && |
| 1504 | (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
Leon Romanovsky | 3085e29 | 2016-09-22 17:31:11 +0300 | [diff] [blame] | 1505 | return 0; |
| 1506 | |
Leon Romanovsky | a762d46 | 2020-03-10 11:14:31 +0200 | [diff] [blame] | 1507 | if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) |
Leon Romanovsky | 3085e29 | 2016-09-22 17:31:11 +0300 | [diff] [blame] | 1508 | return -EINVAL; |
| 1509 | |
| 1510 | return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); |
| 1511 | } |
Eli Cohen | b037c29 | 2017-01-03 23:55:26 +0200 | [diff] [blame] | 1512 | |
| 1513 | static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support) |
| 1514 | { |
| 1515 | return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ? |
| 1516 | MLX5_UARS_IN_PAGE : 1; |
| 1517 | } |
| 1518 | |
Yishai Hadas | 31a78a5 | 2017-12-24 16:31:34 +0200 | [diff] [blame] | 1519 | static inline int get_num_static_uars(struct mlx5_ib_dev *dev, |
| 1520 | struct mlx5_bfreg_info *bfregi) |
Eli Cohen | b037c29 | 2017-01-03 23:55:26 +0200 | [diff] [blame] | 1521 | { |
Yishai Hadas | 31a78a5 | 2017-12-24 16:31:34 +0200 | [diff] [blame] | 1522 | return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages; |
Eli Cohen | b037c29 | 2017-01-03 23:55:26 +0200 | [diff] [blame] | 1523 | } |
| 1524 | |
Ilya Lesokhin | c44ef99 | 2018-03-13 15:18:48 +0200 | [diff] [blame] | 1525 | unsigned long mlx5_ib_get_xlt_emergency_page(void); |
| 1526 | void mlx5_ib_put_xlt_emergency_page(void); |
| 1527 | |
Yishai Hadas | 7c043e9 | 2018-06-17 13:00:03 +0300 | [diff] [blame] | 1528 | int bfregn_to_uar_index(struct mlx5_ib_dev *dev, |
Leon Romanovsky | 05f58ce | 2018-07-08 13:50:21 +0300 | [diff] [blame] | 1529 | struct mlx5_bfreg_info *bfregi, u32 bfregn, |
Yishai Hadas | 7c043e9 | 2018-06-17 13:00:03 +0300 | [diff] [blame] | 1530 | bool dyn_bfreg); |
Mark Zhang | d14133d | 2019-07-02 13:02:36 +0300 | [diff] [blame] | 1531 | |
| 1532 | int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); |
Parav Pandit | 3e1f000 | 2019-07-23 10:31:17 +0300 | [diff] [blame] | 1533 | u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num); |
Moni Shoua | 0e6613b | 2019-08-15 11:38:31 +0300 | [diff] [blame] | 1534 | |
| 1535 | static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, |
Michael Guralnik | d6de0bb | 2020-01-08 20:05:40 +0200 | [diff] [blame] | 1536 | bool do_modify_atomic, int access_flags) |
Moni Shoua | 0e6613b | 2019-08-15 11:38:31 +0300 | [diff] [blame] | 1537 | { |
| 1538 | if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) |
| 1539 | return false; |
| 1540 | |
| 1541 | if (do_modify_atomic && |
| 1542 | MLX5_CAP_GEN(dev->mdev, atomic) && |
| 1543 | MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) |
| 1544 | return false; |
| 1545 | |
Michael Guralnik | 5e29d14 | 2020-02-27 13:38:34 +0200 | [diff] [blame] | 1546 | if (access_flags & IB_ACCESS_RELAXED_ORDERING && |
| 1547 | (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) || |
| 1548 | MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))) |
Michael Guralnik | d6de0bb | 2020-01-08 20:05:40 +0200 | [diff] [blame] | 1549 | return false; |
| 1550 | |
Moni Shoua | 0e6613b | 2019-08-15 11:38:31 +0300 | [diff] [blame] | 1551 | return true; |
| 1552 | } |
Michael Guralnik | 11f552e | 2019-06-10 15:21:24 +0300 | [diff] [blame] | 1553 | |
| 1554 | int mlx5_ib_enable_driver(struct ib_device *dev); |
| 1555 | int mlx5_ib_test_wc(struct mlx5_ib_dev *dev); |
Eli Cohen | e126ba9 | 2013-07-07 17:25:49 +0300 | [diff] [blame] | 1556 | #endif /* MLX5_IB_H */ |