blob: 3856ee3854f6dda26ef83a7965917814e86ab8e0 [file] [log] [blame]
Leon Romanovskyb572ebe2020-07-02 11:18:05 +03001/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
Eli Cohene126ba92013-07-07 17:25:49 +03002/*
Leon Romanovskyb572ebe2020-07-02 11:18:05 +03003 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03004 */
5
6#ifndef MLX5_IB_H
7#define MLX5_IB_H
8
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <rdma/ib_verbs.h>
Leon Romanovsky8b4d5bc2019-01-08 16:07:25 +020012#include <rdma/ib_umem.h>
Eli Cohene126ba92013-07-07 17:25:49 +030013#include <rdma/ib_smi.h>
14#include <linux/mlx5/driver.h>
15#include <linux/mlx5/cq.h>
Mark Blochb823dd62018-09-06 17:27:05 +030016#include <linux/mlx5/fs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030017#include <linux/mlx5/qp.h>
Eli Cohene126ba92013-07-07 17:25:49 +030018#include <linux/types.h>
majd@mellanox.com146d2f12016-01-14 19:13:02 +020019#include <linux/mlx5/transobj.h>
Matan Barakd2370e02016-02-29 18:05:30 +020020#include <rdma/ib_user_verbs.h>
Leon Romanovsky3085e292016-09-22 17:31:11 +030021#include <rdma/mlx5-abi.h>
Ariel Levkovich24da0012018-04-05 18:53:27 +030022#include <rdma/uverbs_ioctl.h>
Yishai Hadasfd44e382018-07-23 15:25:07 +030023#include <rdma/mlx5_user_ioctl_cmds.h>
Ariel Levkovich3b113a12019-05-05 17:07:11 +030024#include <rdma/mlx5_user_ioctl_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030025
Leon Romanovskyf3da6572018-11-28 20:53:41 +020026#include "srq.h"
27
Jason Gunthorpe5a738b52018-09-20 16:42:24 -060028#define mlx5_ib_dbg(_dev, format, arg...) \
29 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
30 __LINE__, current->pid, ##arg)
Eli Cohene126ba92013-07-07 17:25:49 +030031
Jason Gunthorpe5a738b52018-09-20 16:42:24 -060032#define mlx5_ib_err(_dev, format, arg...) \
33 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
34 __LINE__, current->pid, ##arg)
Eli Cohene126ba92013-07-07 17:25:49 +030035
Jason Gunthorpe5a738b52018-09-20 16:42:24 -060036#define mlx5_ib_warn(_dev, format, arg...) \
37 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
38 __LINE__, current->pid, ##arg)
Eli Cohene126ba92013-07-07 17:25:49 +030039
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020040#define MLX5_IB_DEFAULT_UIDX 0xffffff
41#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
Matan Barakb368d7c2015-12-15 20:30:12 +020042
Majd Dibbiny762f8992016-10-27 16:36:47 +030043#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
44
Eli Cohene126ba92013-07-07 17:25:49 +030045enum {
Yishai Hadas7be76be2019-12-12 13:09:27 +020046 MLX5_IB_MMAP_OFFSET_START = 9,
47 MLX5_IB_MMAP_OFFSET_END = 255,
48};
49
50enum {
Eli Cohene126ba92013-07-07 17:25:49 +030051 MLX5_IB_MMAP_CMD_SHIFT = 8,
52 MLX5_IB_MMAP_CMD_MASK = 0xff,
53};
54
Eli Cohene126ba92013-07-07 17:25:49 +030055enum {
56 MLX5_RES_SCAT_DATA32_CQE = 0x1,
57 MLX5_RES_SCAT_DATA64_CQE = 0x2,
58 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
59 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
60};
61
Eli Cohene126ba92013-07-07 17:25:49 +030062enum mlx5_ib_mad_ifc_flags {
63 MLX5_MAD_IFC_IGNORE_MKEY = 1,
64 MLX5_MAD_IFC_IGNORE_BKEY = 2,
65 MLX5_MAD_IFC_NET_VIEW = 4,
66};
67
Leon Romanovsky051f2632015-12-20 12:16:11 +020068enum {
Eli Cohen2f5ff262017-01-03 23:55:21 +020069 MLX5_CROSS_CHANNEL_BFREG = 0,
Leon Romanovsky051f2632015-12-20 12:16:11 +020070};
71
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020072enum {
73 MLX5_CQE_VERSION_V0,
74 MLX5_CQE_VERSION_V1,
75};
76
Artemy Kovalyoveb761892017-08-17 15:52:09 +030077enum {
78 MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
79 MLX5_TM_MAX_SGE = 1,
80};
81
Yishai Hadas4ed131d2017-12-24 16:31:35 +020082enum {
83 MLX5_IB_INVALID_UAR_INDEX = BIT(31),
Yishai Hadas1ee47ab2017-12-24 16:31:36 +020084 MLX5_IB_INVALID_BFREG = BIT(31),
Yishai Hadas4ed131d2017-12-24 16:31:35 +020085};
86
Ariel Levkovich24da0012018-04-05 18:53:27 +030087enum {
88 MLX5_MAX_MEMIC_PAGES = 0x100,
89 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
90};
91
92enum {
93 MLX5_MEMIC_BASE_ALIGN = 6,
94 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
95};
96
Yishai Hadasdc2316e2019-12-12 12:02:37 +020097enum mlx5_ib_mmap_type {
98 MLX5_IB_MMAP_TYPE_MEMIC = 1,
Yishai Hadas7be76be2019-12-12 13:09:27 +020099 MLX5_IB_MMAP_TYPE_VAR = 2,
Yishai Hadas342ee592020-03-24 08:01:39 +0200100 MLX5_IB_MMAP_TYPE_UAR_WC = 3,
101 MLX5_IB_MMAP_TYPE_UAR_NC = 4,
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200102};
103
Leon Romanovsky21528622020-03-24 08:01:42 +0200104struct mlx5_bfreg_info {
105 u32 *sys_pages;
106 int num_low_latency_bfregs;
107 unsigned int *count;
108
109 /*
110 * protect bfreg allocation data structs
111 */
112 struct mutex lock;
113 u32 ver;
114 u8 lib_uar_4k : 1;
Yishai Hadas0a2fd012020-03-24 08:01:43 +0200115 u8 lib_uar_dyn : 1;
Leon Romanovsky21528622020-03-24 08:01:42 +0200116 u32 num_sys_pages;
117 u32 num_static_sys_pages;
118 u32 total_num_bfregs;
119 u32 num_dyn_bfregs;
120};
Ariel Levkovich25c13322019-05-05 17:07:13 +0300121
Eli Cohene126ba92013-07-07 17:25:49 +0300122struct mlx5_ib_ucontext {
123 struct ib_ucontext ibucontext;
124 struct list_head db_page_list;
125
126 /* protect doorbell record alloc/free
127 */
128 struct mutex db_page_mutex;
Eli Cohen2f5ff262017-01-03 23:55:21 +0200129 struct mlx5_bfreg_info bfregi;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200130 u8 cqe_version;
majd@mellanox.com146d2f12016-01-14 19:13:02 +0200131 /* Transport Domain number */
132 u32 tdn;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200133
Eli Cohenb037c292017-01-03 23:55:26 +0200134 u64 lib_caps;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +0300135 u16 devx_uid;
Majd Dibbinyc6a21c32018-08-28 14:29:05 +0300136 /* For RoCE LAG TX affinity */
137 atomic_t tx_port_affinity;
Eli Cohene126ba92013-07-07 17:25:49 +0300138};
139
140static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
141{
142 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
143}
144
145struct mlx5_ib_pd {
146 struct ib_pd ibpd;
147 u32 pdn;
Yishai Hadasa1069c12018-09-20 21:39:19 +0300148 u16 uid;
Eli Cohene126ba92013-07-07 17:25:49 +0300149};
150
Mark Blochb4749bf2018-08-28 14:18:51 +0300151enum {
152 MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
Mark Blocha090d0d2018-08-28 14:18:54 +0300153 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
Mark Bloch08aeb972018-08-28 14:18:53 +0300154 MLX5_IB_FLOW_ACTION_DECAP,
Eli Cohene126ba92013-07-07 17:25:49 +0300155};
156
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200157#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
Maor Gottlieb35d190112016-03-07 18:51:47 +0200158#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200159#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
160#error "Invalid number of bypass priorities"
161#endif
162#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
163
164#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
Maor Gottliebcc0e5d42016-08-28 14:16:34 +0300165#define MLX5_IB_NUM_SNIFFER_FTS 2
Aviad Yehezkel802c2122018-03-28 09:27:53 +0300166#define MLX5_IB_NUM_EGRESS_FTS 1
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200167struct mlx5_ib_flow_prio {
168 struct mlx5_flow_table *flow_table;
169 unsigned int refcount;
170};
171
172struct mlx5_ib_flow_handler {
173 struct list_head list;
174 struct ib_flow ibflow;
Maor Gottlieb5497adc2016-08-28 14:16:31 +0300175 struct mlx5_ib_flow_prio *prio;
Mark Bloch74491de2016-08-31 11:24:25 +0000176 struct mlx5_flow_handle *rule;
Raed Salem3b3233f2018-05-31 16:43:39 +0300177 struct ib_counters *ibcounters;
Yishai Hadasd4be3f42018-07-23 15:25:10 +0300178 struct mlx5_ib_dev *dev;
179 struct mlx5_ib_flow_matcher *flow_matcher;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200180};
181
Yishai Hadasfd44e382018-07-23 15:25:07 +0300182struct mlx5_ib_flow_matcher {
183 struct mlx5_ib_match_params matcher_mask;
184 int mask_len;
185 enum mlx5_ib_flow_type flow_type;
Mark Blochb47fd4f2018-09-06 17:27:07 +0300186 enum mlx5_flow_namespace_type ns_type;
Yishai Hadasfd44e382018-07-23 15:25:07 +0300187 u16 priority;
188 struct mlx5_core_dev *mdev;
189 atomic_t usecnt;
190 u8 match_criteria_enable;
191};
192
Yishai Hadas30f2fe42020-02-19 21:05:18 +0200193struct mlx5_ib_pp {
194 u16 index;
195 struct mlx5_core_dev *mdev;
196};
197
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200198struct mlx5_ib_flow_db {
199 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
Mark Bloch78dd0c42018-09-02 12:51:31 +0300200 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
Maor Gottliebcc0e5d42016-08-28 14:16:34 +0300201 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
Aviad Yehezkel802c2122018-03-28 09:27:53 +0300202 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
Mark Bloch13a43762019-03-28 15:46:21 +0200203 struct mlx5_ib_flow_prio fdb;
Mark Zhangd8abe882019-08-19 14:36:26 +0300204 struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
Michael Guralnikaf9c3842020-03-24 08:14:25 +0200205 struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
Aviv Heller9ef9c642016-09-18 20:48:01 +0300206 struct mlx5_flow_table *lag_demux_ft;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200207 /* Protect flow steering bypass flow tables
208 * when add/del flow rules.
209 * only single add/removal of flow steering rule could be done
210 * simultaneously.
211 */
212 struct mutex lock;
213};
214
Eli Cohene126ba92013-07-07 17:25:49 +0300215/* Use macros here so that don't have to duplicate
216 * enum ib_send_flags and enum ib_qp_type for low-level driver
217 */
218
Artemy Kovalyov31616252017-01-02 11:37:42 +0200219#define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
220#define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
221#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
222#define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
223#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
224#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
Noa Osherovich56e11d62016-02-29 16:46:51 +0200225
Eli Cohene126ba92013-07-07 17:25:49 +0300226#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
Haggai Erand16e91d2016-02-29 15:45:05 +0200227/*
228 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
229 * creates the actual hardware QP.
230 */
231#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
Moni Shouab4aaa1f2018-01-02 16:19:31 +0200232#define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
233#define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
Eli Cohene126ba92013-07-07 17:25:49 +0300234#define MLX5_IB_WR_UMR IB_WR_RESERVED1
235
Artemy Kovalyov31616252017-01-02 11:37:42 +0200236#define MLX5_IB_UMR_OCTOWORD 16
237#define MLX5_IB_UMR_XLT_ALIGNMENT 64
238
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200239#define MLX5_IB_UPD_XLT_ZAP BIT(0)
240#define MLX5_IB_UPD_XLT_ENABLE BIT(1)
241#define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
242#define MLX5_IB_UPD_XLT_ADDR BIT(3)
243#define MLX5_IB_UPD_XLT_PD BIT(4)
244#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200245#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200246
Haggai Eranb11a4f92016-02-29 15:45:03 +0200247/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
248 *
249 * These flags are intended for internal use by the mlx5_ib driver, and they
250 * rely on the range reserved for that use in the ib_qp_create_flags enum.
251 */
Michael Guralnik3f89b012019-10-20 09:43:59 +0300252#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
Michael Guralnik11f552e2019-06-10 15:21:24 +0300253#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
Haggai Eranb11a4f92016-02-29 15:45:03 +0200254
Eli Cohene126ba92013-07-07 17:25:49 +0300255struct wr_list {
256 u16 opcode;
257 u16 next;
258};
259
Noa Osheroviche4cc4fa2017-01-18 15:40:03 +0200260enum mlx5_ib_rq_flags {
261 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
Noa Osherovichb1383aa2017-10-29 13:59:45 +0200262 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
Noa Osheroviche4cc4fa2017-01-18 15:40:03 +0200263};
264
Eli Cohene126ba92013-07-07 17:25:49 +0300265struct mlx5_ib_wq {
Guy Levi34f4c952018-11-26 08:15:50 +0200266 struct mlx5_frag_buf_ctrl fbc;
Eli Cohene126ba92013-07-07 17:25:49 +0300267 u64 *wrid;
268 u32 *wr_data;
269 struct wr_list *w_list;
270 unsigned *wqe_head;
271 u16 unsig_count;
272
273 /* serialize post to the work queue
274 */
275 spinlock_t lock;
276 int wqe_cnt;
277 int max_post;
278 int max_gs;
279 int offset;
280 int wqe_shift;
281 unsigned head;
282 unsigned tail;
283 u16 cur_post;
Leon Romanovsky950bf4f2020-03-18 11:16:40 +0200284 u16 last_poll;
Guy Levi34f4c952018-11-26 08:15:50 +0200285 void *cur_edge;
Eli Cohene126ba92013-07-07 17:25:49 +0300286};
287
Maor Gottlieb03404e82017-05-30 10:29:13 +0300288enum mlx5_ib_wq_flags {
289 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
Noa Osherovichccc87082017-10-17 18:01:13 +0300290 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
Maor Gottlieb03404e82017-05-30 10:29:13 +0300291};
292
Noa Osherovichb4f34592017-10-17 18:01:12 +0300293#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
294#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
295#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
296#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
Mark Zhangc16339b2019-11-15 17:45:55 +0200297#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
Noa Osherovichb4f34592017-10-17 18:01:12 +0300298
Yishai Hadas79b20a62016-05-23 15:20:50 +0300299struct mlx5_ib_rwq {
300 struct ib_wq ibwq;
Yishai Hadas350d0e42016-08-28 14:58:18 +0300301 struct mlx5_core_qp core_qp;
Yishai Hadas79b20a62016-05-23 15:20:50 +0300302 u32 rq_num_pas;
303 u32 log_rq_stride;
304 u32 log_rq_size;
305 u32 rq_page_offset;
306 u32 log_page_size;
Noa Osherovichccc87082017-10-17 18:01:13 +0300307 u32 log_num_strides;
308 u32 two_byte_shift_en;
309 u32 single_stride_log_num_of_bytes;
Yishai Hadas79b20a62016-05-23 15:20:50 +0300310 struct ib_umem *umem;
311 size_t buf_size;
312 unsigned int page_shift;
Yishai Hadas79b20a62016-05-23 15:20:50 +0300313 struct mlx5_db db;
314 u32 user_index;
315 u32 wqe_count;
316 u32 wqe_shift;
317 int wq_sig;
Maor Gottlieb03404e82017-05-30 10:29:13 +0300318 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
Yishai Hadas79b20a62016-05-23 15:20:50 +0300319};
320
Yishai Hadasc5f90922016-05-23 15:20:53 +0300321struct mlx5_ib_rwq_ind_table {
322 struct ib_rwq_ind_table ib_rwq_ind_tbl;
323 u32 rqtn;
Yishai Hadas5deba862018-09-20 21:39:28 +0300324 u16 uid;
Yishai Hadasc5f90922016-05-23 15:20:53 +0300325};
326
majd@mellanox.com19098df2016-01-14 19:13:03 +0200327struct mlx5_ib_ubuffer {
328 struct ib_umem *umem;
329 int buf_size;
330 u64 buf_addr;
331};
332
333struct mlx5_ib_qp_base {
334 struct mlx5_ib_qp *container_mibqp;
335 struct mlx5_core_qp mqp;
336 struct mlx5_ib_ubuffer ubuffer;
337};
338
339struct mlx5_ib_qp_trans {
340 struct mlx5_ib_qp_base base;
341 u16 xrcdn;
342 u8 alt_port;
343 u8 atomic_rd_en;
344 u8 resp_depth;
345};
346
Yishai Hadas28d61372016-05-23 15:20:56 +0300347struct mlx5_ib_rss_qp {
348 u32 tirn;
349};
350
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200351struct mlx5_ib_rq {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200352 struct mlx5_ib_qp_base base;
353 struct mlx5_ib_wq *rq;
354 struct mlx5_ib_ubuffer ubuffer;
355 struct mlx5_db *doorbell;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200356 u32 tirn;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200357 u8 state;
Noa Osheroviche4cc4fa2017-01-18 15:40:03 +0200358 u32 flags;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200359};
360
361struct mlx5_ib_sq {
362 struct mlx5_ib_qp_base base;
363 struct mlx5_ib_wq *sq;
364 struct mlx5_ib_ubuffer ubuffer;
365 struct mlx5_db *doorbell;
Mark Blochb96c9dd2018-01-29 10:40:37 +0000366 struct mlx5_flow_handle *flow_rule;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200367 u32 tisn;
368 u8 state;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200369};
370
371struct mlx5_ib_raw_packet_qp {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200372 struct mlx5_ib_sq sq;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200373 struct mlx5_ib_rq rq;
374};
375
Eli Cohen5fe9dec2017-01-03 23:55:25 +0200376struct mlx5_bf {
377 int buf_size;
378 unsigned long offset;
379 struct mlx5_sq_bfreg *bfreg;
380};
381
Moni Shouab4aaa1f2018-01-02 16:19:31 +0200382struct mlx5_ib_dct {
383 struct mlx5_core_dct mdct;
384 u32 *in;
385};
386
Eli Cohene126ba92013-07-07 17:25:49 +0300387struct mlx5_ib_qp {
388 struct ib_qp ibqp;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200389 union {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200390 struct mlx5_ib_qp_trans trans_qp;
391 struct mlx5_ib_raw_packet_qp raw_packet_qp;
Yishai Hadas28d61372016-05-23 15:20:56 +0300392 struct mlx5_ib_rss_qp rss_qp;
Moni Shouab4aaa1f2018-01-02 16:19:31 +0200393 struct mlx5_ib_dct dct;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200394 };
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200395 struct mlx5_frag_buf buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300396
397 struct mlx5_db db;
398 struct mlx5_ib_wq rq;
399
Eli Cohene126ba92013-07-07 17:25:49 +0300400 u8 sq_signal_bits;
Max Gurtovoy6e8484c2017-05-28 10:53:11 +0300401 u8 next_fence;
Eli Cohene126ba92013-07-07 17:25:49 +0300402 struct mlx5_ib_wq sq;
403
Eli Cohene126ba92013-07-07 17:25:49 +0300404 /* serialize qp state modifications
405 */
406 struct mutex mutex;
Leon Romanovskya8f3ea62020-04-27 18:46:17 +0300407 /* cached variant of create_flags from struct ib_qp_init_attr */
Eli Cohene126ba92013-07-07 17:25:49 +0300408 u32 flags;
409 u8 port;
Eli Cohene126ba92013-07-07 17:25:49 +0300410 u8 state;
Eli Cohene126ba92013-07-07 17:25:49 +0300411 int max_inline_data;
Eli Cohen5fe9dec2017-01-03 23:55:25 +0200412 struct mlx5_bf bf;
Leon Romanovsky2be08c32020-04-27 18:46:13 +0300413 u8 has_rq:1;
414 u8 is_rss:1;
Eli Cohene126ba92013-07-07 17:25:49 +0300415
416 /* only for user space QPs. For kernel
417 * we have it from the bf object
418 */
Eli Cohen2f5ff262017-01-03 23:55:21 +0200419 int bfregn;
Eli Cohene126ba92013-07-07 17:25:49 +0300420
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300421 struct list_head qps_list;
422 struct list_head cq_recv_list;
423 struct list_head cq_send_list;
Bodong Wang61147f32018-03-19 15:10:30 +0200424 struct mlx5_rate_limit rl;
Yishai Hadasc2e53b22017-06-08 16:15:08 +0300425 u32 underlay_qpn;
Mark Bloch175edba2018-09-17 13:30:48 +0300426 u32 flags_en;
Leon Romanovsky7aede1a22020-04-27 18:46:20 +0300427 /*
428 * IB/core doesn't store low-level QP types, so
429 * store both MLX and IBTA types in the field below.
430 * IB_QPT_DRIVER will be break to DCI/DCT subtypes.
431 */
432 enum ib_qp_type type;
Mark Zhangd14133d2019-07-02 13:02:36 +0300433 /* A flag to indicate if there's a new counter is configured
434 * but not take effective
435 */
436 u32 counter_pending;
Maor Gottliebcfc1a892020-04-30 22:21:46 +0300437 u16 gsi_lag_port;
Eli Cohene126ba92013-07-07 17:25:49 +0300438};
439
440struct mlx5_ib_cq_buf {
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200441 struct mlx5_frag_buf_ctrl fbc;
Tariq Toukan4972e6f2018-09-12 15:36:41 +0300442 struct mlx5_frag_buf frag_buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300443 struct ib_umem *umem;
444 int cqe_size;
Eli Cohenbde51582014-01-14 17:45:18 +0200445 int nent;
Eli Cohene126ba92013-07-07 17:25:49 +0300446};
447
Haggai Eran968e78d2014-12-11 17:04:11 +0200448struct mlx5_umr_wr {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100449 struct ib_send_wr wr;
Artemy Kovalyov31616252017-01-02 11:37:42 +0200450 u64 virt_addr;
451 u64 offset;
Haggai Eran968e78d2014-12-11 17:04:11 +0200452 struct ib_pd *pd;
453 unsigned int page_shift;
Artemy Kovalyov31616252017-01-02 11:37:42 +0200454 unsigned int xlt_size;
Maor Gottliebb216af42016-11-27 15:18:22 +0200455 u64 length;
Haggai Eran968e78d2014-12-11 17:04:11 +0200456 int access_flags;
457 u32 mkey;
Yishai Hadas6a053952019-07-23 09:57:25 +0300458 u8 ignore_free_state:1;
Haggai Eran968e78d2014-12-11 17:04:11 +0200459};
460
Bart Van Asschef696bf62018-07-18 09:25:14 -0700461static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100462{
463 return container_of(wr, struct mlx5_umr_wr, wr);
464}
465
Eli Cohene126ba92013-07-07 17:25:49 +0300466struct mlx5_shared_mr_info {
467 int mr_id;
468 struct ib_umem *umem;
469};
470
Guy Levi7a0c8f42017-10-19 08:25:53 +0300471enum mlx5_ib_cq_pr_flags {
472 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
473};
474
Eli Cohene126ba92013-07-07 17:25:49 +0300475struct mlx5_ib_cq {
476 struct ib_cq ibcq;
477 struct mlx5_core_cq mcq;
478 struct mlx5_ib_cq_buf buf;
479 struct mlx5_db db;
480
481 /* serialize access to the CQ
482 */
483 spinlock_t lock;
484
485 /* protect resize cq
486 */
487 struct mutex resize_mutex;
Eli Cohenbde51582014-01-14 17:45:18 +0200488 struct mlx5_ib_cq_buf *resize_buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300489 struct ib_umem *resize_umem;
490 int cqe_size;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300491 struct list_head list_send_qp;
492 struct list_head list_recv_qp;
Leon Romanovsky051f2632015-12-20 12:16:11 +0200493 u32 create_flags;
Haggai Eran25361e02016-02-29 15:45:08 +0200494 struct list_head wc_list;
495 enum ib_cq_notify_flags notify_flags;
496 struct work_struct notify_work;
Guy Levi7a0c8f42017-10-19 08:25:53 +0300497 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
Haggai Eran25361e02016-02-29 15:45:08 +0200498};
499
500struct mlx5_ib_wc {
501 struct ib_wc wc;
502 struct list_head list;
Eli Cohene126ba92013-07-07 17:25:49 +0300503};
504
505struct mlx5_ib_srq {
506 struct ib_srq ibsrq;
507 struct mlx5_core_srq msrq;
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200508 struct mlx5_frag_buf buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300509 struct mlx5_db db;
Guy Levi20e5a592018-11-26 08:15:39 +0200510 struct mlx5_frag_buf_ctrl fbc;
Eli Cohene126ba92013-07-07 17:25:49 +0300511 u64 *wrid;
512 /* protect SRQ hanlding
513 */
514 spinlock_t lock;
515 int head;
516 int tail;
517 u16 wqe_ctr;
518 struct ib_umem *umem;
519 /* serialize arming a SRQ
520 */
521 struct mutex mutex;
522 int wq_sig;
523};
524
525struct mlx5_ib_xrcd {
526 struct ib_xrcd ibxrcd;
527 u32 xrcdn;
528};
529
Haggai Erancc149f752014-12-11 17:04:21 +0200530enum mlx5_ib_mtt_access_flags {
531 MLX5_IB_MTT_READ = (1 << 0),
532 MLX5_IB_MTT_WRITE = (1 << 1),
533};
534
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200535struct mlx5_user_mmap_entry {
536 struct rdma_user_mmap_entry rdma_entry;
537 u8 mmap_flag;
538 u64 address;
Yishai Hadas7be76be2019-12-12 13:09:27 +0200539 u32 page_idx;
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200540};
541
Ariel Levkovich24da0012018-04-05 18:53:27 +0300542struct mlx5_ib_dm {
543 struct ib_dm ibdm;
544 phys_addr_t dev_addr;
Ariel Levkovich3b113a12019-05-05 17:07:11 +0300545 u32 type;
546 size_t size;
Ariel Levkovich25c13322019-05-05 17:07:13 +0300547 union {
548 struct {
549 u32 obj_id;
550 } icm_dm;
551 /* other dm types specific params should be added here */
552 };
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200553 struct mlx5_user_mmap_entry mentry;
Ariel Levkovich24da0012018-04-05 18:53:27 +0300554};
555
Haggai Erancc149f752014-12-11 17:04:21 +0200556#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
557
Ariel Levkovich3b113a12019-05-05 17:07:11 +0300558#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
559 IB_ACCESS_REMOTE_WRITE |\
560 IB_ACCESS_REMOTE_READ |\
561 IB_ACCESS_REMOTE_ATOMIC |\
562 IB_ZERO_BASED)
Ariel Levkovich6c29f572018-04-05 18:53:29 +0300563
Ariel Levkovich25c13322019-05-05 17:07:13 +0300564#define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
565 IB_ACCESS_REMOTE_WRITE |\
566 IB_ACCESS_REMOTE_READ |\
567 IB_ZERO_BASED)
568
Erez Alfasia3de94e2019-10-16 09:23:05 +0300569#define mlx5_update_odp_stats(mr, counter_name, value) \
570 atomic64_add(value, &((mr)->odp_stats.counter_name))
571
Eli Cohene126ba92013-07-07 17:25:49 +0300572struct mlx5_ib_mr {
573 struct ib_mr ibmr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300574 void *descs;
575 dma_addr_t desc_map;
576 int ndescs;
Max Gurtovoy6c984472019-06-11 18:52:42 +0300577 int data_length;
578 int meta_ndescs;
579 int meta_length;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300580 int max_descs;
581 int desc_size;
Sagi Grimbergb005d312016-02-29 19:07:33 +0200582 int access_mode;
Matan Baraka606b0f2016-02-29 18:05:28 +0200583 struct mlx5_core_mkey mmkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300584 struct ib_umem *umem;
585 struct mlx5_shared_mr_info *smr_info;
586 struct list_head list;
Jason Gunthorpeb91e1752020-03-10 10:22:32 +0200587 unsigned int order;
588 struct mlx5_cache_ent *cache_ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300589 int npages;
Eli Cohen746b5582013-10-23 09:53:14 +0300590 struct mlx5_ib_dev *dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300591 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
Sagi Grimberg3121e3c2014-02-23 14:19:06 +0200592 struct mlx5_core_sig_ctx *sig;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300593 void *descs_alloc;
Noa Osherovich56e11d62016-02-29 16:46:51 +0200594 int access_flags; /* Needed for rereg MR */
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200595
596 struct mlx5_ib_mr *parent;
Israel Rukshinde0ae952019-06-11 18:52:55 +0300597 /* Needed for IB_MR_TYPE_INTEGRITY */
598 struct mlx5_ib_mr *pi_mr;
599 struct mlx5_ib_mr *klm_mr;
600 struct mlx5_ib_mr *mtt_mr;
Max Gurtovoy2563e2f2019-06-11 18:52:56 +0300601 u64 data_iova;
Israel Rukshinde0ae952019-06-11 18:52:55 +0300602 u64 pi_iova;
603
Jason Gunthorpe423f52d2019-10-09 13:09:29 -0300604 /* For ODP and implicit */
Jason Gunthorpe5256edc2019-10-09 13:09:32 -0300605 atomic_t num_deferred_work;
Artemy Kovalyovde5ed002020-02-27 13:39:18 +0200606 wait_queue_head_t q_deferred_work;
Jason Gunthorpe423f52d2019-10-09 13:09:29 -0300607 struct xarray implicit_children;
Jason Gunthorpe5256edc2019-10-09 13:09:32 -0300608 union {
609 struct rcu_head rcu;
610 struct list_head elm;
611 struct work_struct work;
612 } odp_destroy;
Erez Alfasia3de94e2019-10-16 09:23:05 +0300613 struct ib_odp_counters odp_stats;
Erez Alfasie1b95ae2019-10-16 09:23:07 +0300614 bool is_odp_implicit;
Jason Gunthorpe423f52d2019-10-09 13:09:29 -0300615
616 struct mlx5_async_work cb_work;
Eli Cohene126ba92013-07-07 17:25:49 +0300617};
618
Leon Romanovsky8b4d5bc2019-01-08 16:07:25 +0200619static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
620{
621 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
622 mr->umem->is_odp;
623}
624
Matan Barakd2370e02016-02-29 18:05:30 +0200625struct mlx5_ib_mw {
626 struct ib_mw ibmw;
627 struct mlx5_core_mkey mmkey;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +0300628 int ndescs;
Eli Cohene126ba92013-07-07 17:25:49 +0300629};
630
Yishai Hadas534fd7a2019-01-13 16:01:17 +0200631struct mlx5_ib_devx_mr {
632 struct mlx5_core_mkey mmkey;
633 int ndescs;
Yishai Hadas534fd7a2019-01-13 16:01:17 +0200634};
635
Shachar Raindela74d2412014-05-22 14:50:12 +0300636struct mlx5_ib_umr_context {
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100637 struct ib_cqe cqe;
Shachar Raindela74d2412014-05-22 14:50:12 +0300638 enum ib_wc_status status;
639 struct completion done;
640};
641
Eli Cohene126ba92013-07-07 17:25:49 +0300642struct umr_common {
643 struct ib_pd *pd;
644 struct ib_cq *cq;
645 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300646 /* control access to UMR QP
647 */
648 struct semaphore sem;
649};
650
Eli Cohene126ba92013-07-07 17:25:49 +0300651struct mlx5_cache_ent {
652 struct list_head head;
653 /* sync access to the cahce entry
654 */
655 spinlock_t lock;
656
657
Eli Cohene126ba92013-07-07 17:25:49 +0300658 char name[4];
659 u32 order;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200660 u32 xlt;
661 u32 access_mode;
662 u32 page;
663
Jason Gunthorpeb9358bd2020-03-10 10:22:36 +0200664 u8 disabled:1;
Jason Gunthorpe1c78a212020-03-10 10:22:37 +0200665 u8 fill_to_high_water:1;
Jason Gunthorpeb9358bd2020-03-10 10:22:36 +0200666
Jason Gunthorpe7c8691a2020-03-10 10:22:31 +0200667 /*
668 * - available_mrs is the length of list head, ie the number of MRs
669 * available for immediate allocation.
670 * - total_mrs is available_mrs plus all in use MRs that could be
671 * returned to the cache.
672 * - limit is the low water mark for available_mrs, 2* limit is the
673 * upper water mark.
674 * - pending is the number of MRs currently being created
675 */
676 u32 total_mrs;
677 u32 available_mrs;
678 u32 limit;
679 u32 pending;
680
681 /* Statistics */
Eli Cohene126ba92013-07-07 17:25:49 +0300682 u32 miss;
Eli Cohene126ba92013-07-07 17:25:49 +0300683
Eli Cohene126ba92013-07-07 17:25:49 +0300684 struct mlx5_ib_dev *dev;
685 struct work_struct work;
686 struct delayed_work dwork;
687};
688
689struct mlx5_mr_cache {
690 struct workqueue_struct *wq;
691 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
Eli Cohene126ba92013-07-07 17:25:49 +0300692 struct dentry *root;
693 unsigned long last_add;
694};
695
Haggai Erand16e91d2016-02-29 15:45:05 +0200696struct mlx5_ib_gsi_qp;
697
698struct mlx5_ib_port_resources {
Haggai Eran7722f472016-02-29 15:45:07 +0200699 struct mlx5_ib_resources *devr;
Haggai Erand16e91d2016-02-29 15:45:05 +0200700 struct mlx5_ib_gsi_qp *gsi;
Haggai Eran7722f472016-02-29 15:45:07 +0200701 struct work_struct pkey_change_work;
Haggai Erand16e91d2016-02-29 15:45:05 +0200702};
703
Eli Cohene126ba92013-07-07 17:25:49 +0300704struct mlx5_ib_resources {
705 struct ib_cq *c0;
Leon Romanovskyf4375442020-07-06 15:27:14 +0300706 u32 xrcdn0;
707 u32 xrcdn1;
Eli Cohene126ba92013-07-07 17:25:49 +0300708 struct ib_pd *p0;
709 struct ib_srq *s0;
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +0300710 struct ib_srq *s1;
Haggai Erand16e91d2016-02-29 15:45:05 +0200711 struct mlx5_ib_port_resources ports[2];
712 /* Protects changes to the port resources */
713 struct mutex mutex;
Eli Cohene126ba92013-07-07 17:25:49 +0300714};
715
Parav Pandite1f24a72017-04-16 07:29:29 +0300716struct mlx5_ib_counters {
Kamal Heib7c16f472017-01-18 15:25:09 +0200717 const char **names;
718 size_t *offsets;
Parav Pandite1f24a72017-04-16 07:29:29 +0300719 u32 num_q_counters;
720 u32 num_cong_counters;
Talat Batheesh9f876f32018-06-21 15:37:56 +0300721 u32 num_ext_ppcnt_counters;
Kamal Heib7c16f472017-01-18 15:25:09 +0200722 u16 set_id;
723};
724
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200725struct mlx5_ib_multiport_info;
726
727struct mlx5_ib_multiport {
728 struct mlx5_ib_multiport_info *mpi;
729 /* To be held when accessing the multiport info */
730 spinlock_t mpi_lock;
731};
732
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200733struct mlx5_roce {
734 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
735 * netdev pointer
736 */
737 rwlock_t netdev_lock;
738 struct net_device *netdev;
739 struct notifier_block nb;
Majd Dibbinyc6a21c32018-08-28 14:29:05 +0300740 atomic_t tx_port_affinity;
Moni Shouafd65f1b2017-05-30 09:56:05 +0300741 enum ib_port_state last_port_state;
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200742 struct mlx5_ib_dev *dev;
743 u8 native_port_num;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200744};
745
Mark Bloch95579e72019-03-28 15:27:33 +0200746struct mlx5_ib_port {
747 struct mlx5_ib_counters cnts;
748 struct mlx5_ib_multiport mp;
749 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
750 struct mlx5_roce roce;
Mark Bloch6a4d00b2019-03-28 15:27:37 +0200751 struct mlx5_eswitch_rep *rep;
Mark Bloch95579e72019-03-28 15:27:33 +0200752};
753
Parav Pandit4a2da0b2017-05-30 10:05:15 +0300754struct mlx5_ib_dbg_param {
755 int offset;
756 struct mlx5_ib_dev *dev;
757 struct dentry *dentry;
Parav Pandita9e546e2018-01-04 17:25:39 +0200758 u8 port_num;
Parav Pandit4a2da0b2017-05-30 10:05:15 +0300759};
760
761enum mlx5_ib_dbg_cc_types {
762 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
763 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
764 MLX5_IB_DBG_CC_RP_TIME_RESET,
765 MLX5_IB_DBG_CC_RP_BYTE_RESET,
766 MLX5_IB_DBG_CC_RP_THRESHOLD,
767 MLX5_IB_DBG_CC_RP_AI_RATE,
Parav Pandit9e3aaf62020-02-27 14:52:46 +0200768 MLX5_IB_DBG_CC_RP_MAX_RATE,
Parav Pandit4a2da0b2017-05-30 10:05:15 +0300769 MLX5_IB_DBG_CC_RP_HAI_RATE,
770 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
771 MLX5_IB_DBG_CC_RP_MIN_RATE,
772 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
773 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
774 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
775 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
776 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
777 MLX5_IB_DBG_CC_RP_GD,
Parav Pandit9e3aaf62020-02-27 14:52:46 +0200778 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
Parav Pandit4a2da0b2017-05-30 10:05:15 +0300779 MLX5_IB_DBG_CC_NP_CNP_DSCP,
780 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
781 MLX5_IB_DBG_CC_NP_CNP_PRIO,
782 MLX5_IB_DBG_CC_MAX,
783};
784
785struct mlx5_ib_dbg_cc_params {
786 struct dentry *root;
787 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
788};
789
Maor Gottlieb03404e82017-05-30 10:29:13 +0300790enum {
791 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
792};
793
794struct mlx5_ib_delay_drop {
795 struct mlx5_ib_dev *dev;
796 struct work_struct delay_drop_work;
797 /* serialize setting of delay drop */
798 struct mutex lock;
799 u32 timeout;
800 bool activate;
Maor Gottliebfe248c32017-05-30 10:29:14 +0300801 atomic_t events_cnt;
802 atomic_t rqs_cnt;
Greg Kroah-Hartman09b09652019-11-04 08:38:07 +0100803 struct dentry *dir_debugfs;
Maor Gottlieb03404e82017-05-30 10:29:13 +0300804};
805
Mark Bloch16c19752018-01-01 13:06:58 +0200806enum mlx5_ib_stages {
807 MLX5_IB_STAGE_INIT,
Leon Romanovskyf7c4ffd2020-07-02 11:18:07 +0300808 MLX5_IB_STAGE_FS,
Mark Bloch16c19752018-01-01 13:06:58 +0200809 MLX5_IB_STAGE_CAPS,
Mark Bloch8e6efa32017-11-06 12:22:13 +0000810 MLX5_IB_STAGE_NON_DEFAULT_CB,
Mark Bloch16c19752018-01-01 13:06:58 +0200811 MLX5_IB_STAGE_ROCE,
Leon Romanovsky333fbaa2020-04-04 10:40:24 +0300812 MLX5_IB_STAGE_QP,
Leon Romanovskyf3da6572018-11-28 20:53:41 +0200813 MLX5_IB_STAGE_SRQ,
Mark Bloch16c19752018-01-01 13:06:58 +0200814 MLX5_IB_STAGE_DEVICE_RESOURCES,
Saeed Mahameeddf097a22018-11-26 14:39:00 -0800815 MLX5_IB_STAGE_DEVICE_NOTIFIER,
Mark Bloch16c19752018-01-01 13:06:58 +0200816 MLX5_IB_STAGE_ODP,
817 MLX5_IB_STAGE_COUNTERS,
818 MLX5_IB_STAGE_CONG_DEBUGFS,
819 MLX5_IB_STAGE_UAR,
820 MLX5_IB_STAGE_BFREG,
Mark Bloch42cea832018-03-14 09:14:15 +0200821 MLX5_IB_STAGE_PRE_IB_REG_UMR,
Leon Romanovsky81773ce2018-11-28 20:53:39 +0200822 MLX5_IB_STAGE_WHITELIST_UID,
Mark Bloch16c19752018-01-01 13:06:58 +0200823 MLX5_IB_STAGE_IB_REG,
Mark Bloch42cea832018-03-14 09:14:15 +0200824 MLX5_IB_STAGE_POST_IB_REG_UMR,
Mark Bloch16c19752018-01-01 13:06:58 +0200825 MLX5_IB_STAGE_DELAY_DROP,
Leon Romanovskyb572ebe2020-07-02 11:18:05 +0300826 MLX5_IB_STAGE_RESTRACK,
Mark Bloch16c19752018-01-01 13:06:58 +0200827 MLX5_IB_STAGE_MAX,
828};
829
830struct mlx5_ib_stage {
831 int (*init)(struct mlx5_ib_dev *dev);
832 void (*cleanup)(struct mlx5_ib_dev *dev);
833};
834
835#define STAGE_CREATE(_stage, _init, _cleanup) \
836 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
837
838struct mlx5_ib_profile {
839 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
840};
841
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200842struct mlx5_ib_multiport_info {
843 struct list_head list;
844 struct mlx5_ib_dev *ibdev;
845 struct mlx5_core_dev *mdev;
Saeed Mahameeddf097a22018-11-26 14:39:00 -0800846 struct notifier_block mdev_events;
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200847 struct completion unref_comp;
848 u64 sys_image_guid;
849 u32 mdev_refcnt;
850 bool is_master;
851 bool unaffiliate;
852};
853
Aviad Yehezkelc6475a02018-03-28 09:27:50 +0300854struct mlx5_ib_flow_action {
855 struct ib_flow_action ib_action;
856 union {
857 struct {
858 u64 ib_flags;
859 struct mlx5_accel_esp_xfrm *ctx;
860 } esp_aes_gcm;
Mark Blochb4749bf2018-08-28 14:18:51 +0300861 struct {
862 struct mlx5_ib_dev *dev;
863 u32 sub_type;
Maor Gottlieb2b688ea2019-08-15 13:54:17 +0300864 union {
865 struct mlx5_modify_hdr *modify_hdr;
866 struct mlx5_pkt_reformat *pkt_reformat;
867 };
Mark Blochb4749bf2018-08-28 14:18:51 +0300868 } flow_action_raw;
Aviad Yehezkelc6475a02018-03-28 09:27:50 +0300869 };
870};
871
Ariel Levkovich3b113a12019-05-05 17:07:11 +0300872struct mlx5_dm {
Ariel Levkovich24da0012018-04-05 18:53:27 +0300873 struct mlx5_core_dev *dev;
Ariel Levkovich3b113a12019-05-05 17:07:11 +0300874 /* This lock is used to protect the access to the shared
875 * allocation map when concurrent requests by different
876 * processes are handled.
877 */
878 spinlock_t lock;
Ariel Levkovich24da0012018-04-05 18:53:27 +0300879 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
880};
881
Raed Salem5e95af52018-05-31 16:43:40 +0300882struct mlx5_read_counters_attr {
883 struct mlx5_fc *hw_cntrs_hndl;
884 u64 *out;
885 u32 flags;
886};
887
Raed Salem3b3233f2018-05-31 16:43:39 +0300888enum mlx5_ib_counters_type {
889 MLX5_IB_COUNTERS_FLOW,
890};
891
Raed Salemb29e2a12018-05-31 16:43:38 +0300892struct mlx5_ib_mcounters {
893 struct ib_counters ibcntrs;
Raed Salem3b3233f2018-05-31 16:43:39 +0300894 enum mlx5_ib_counters_type type;
Raed Salem5e95af52018-05-31 16:43:40 +0300895 /* number of counters supported for this counters type */
896 u32 counters_num;
897 struct mlx5_fc *hw_cntrs_hndl;
898 /* read function for this counters type */
899 int (*read_counters)(struct ib_device *ibdev,
900 struct mlx5_read_counters_attr *read_attr);
Raed Salem3b3233f2018-05-31 16:43:39 +0300901 /* max index set as part of create_flow */
902 u32 cntrs_max_index;
903 /* number of counters data entries (<description,index> pair) */
904 u32 ncounters;
905 /* counters data array for descriptions and indexes */
906 struct mlx5_ib_flow_counters_desc *counters_data;
907 /* protects access to mcounters internal data */
908 struct mutex mcntrs_mutex;
Raed Salemb29e2a12018-05-31 16:43:38 +0300909};
910
911static inline struct mlx5_ib_mcounters *
912to_mcounters(struct ib_counters *ibcntrs)
913{
914 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
915}
916
Mark Bloch2ea26202018-09-06 17:27:03 +0300917int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
918 bool is_egress,
919 struct mlx5_flow_act *action);
Mark Blocha560f1d2018-09-17 13:30:47 +0300920struct mlx5_ib_lb_state {
921 /* protect the user_td */
922 struct mutex mutex;
923 u32 user_td;
Mark Bloch0042f9e2018-09-17 13:30:49 +0300924 int qps;
925 bool enabled;
Mark Blocha560f1d2018-09-17 13:30:47 +0300926};
927
Saeed Mahameedd5d284b2018-11-19 10:52:41 -0800928struct mlx5_ib_pf_eq {
Yuval Avneryca390792019-06-10 23:38:23 +0000929 struct notifier_block irq_nb;
Saeed Mahameedd5d284b2018-11-19 10:52:41 -0800930 struct mlx5_ib_dev *dev;
931 struct mlx5_eq *core;
932 struct work_struct work;
933 spinlock_t lock; /* Pagefaults spinlock */
934 struct workqueue_struct *wq;
935 mempool_t *pool;
936};
937
Yishai Hadase337dd52019-06-30 19:23:30 +0300938struct mlx5_devx_event_table {
939 struct mlx5_nb devx_nb;
940 /* serialize updating the event_xa */
941 struct mutex event_xa_lock;
942 struct xarray event_xa;
943};
944
Yishai Hadasf164be82019-12-12 13:09:26 +0200945struct mlx5_var_table {
946 /* serialize updating the bitmap */
947 struct mutex bitmap_lock;
948 unsigned long *bitmap;
949 u64 hw_start_addr;
950 u32 stride_size;
951 u64 num_var_hw_entries;
952};
953
Eli Cohene126ba92013-07-07 17:25:49 +0300954struct mlx5_ib_dev {
955 struct ib_device ib_dev;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300956 struct mlx5_core_dev *mdev;
Saeed Mahameeddf097a22018-11-26 14:39:00 -0800957 struct notifier_block mdev_events;
Eli Cohene126ba92013-07-07 17:25:49 +0300958 int num_ports;
Eli Cohene126ba92013-07-07 17:25:49 +0300959 /* serialize update of capability mask
960 */
961 struct mutex cap_mask_mutex;
Leon Romanovsky4b2a6732019-10-02 15:25:14 +0300962 u8 ib_active:1;
Leon Romanovsky4b2a6732019-10-02 15:25:14 +0300963 u8 is_rep:1;
964 u8 lag_active:1;
Michael Guralnik11f552e2019-06-10 15:21:24 +0300965 u8 wc_support:1;
Jason Gunthorpeb9358bd2020-03-10 10:22:36 +0200966 u8 fill_delay;
Eli Cohene126ba92013-07-07 17:25:49 +0300967 struct umr_common umrc;
968 /* sync used page count stats
969 */
Eli Cohene126ba92013-07-07 17:25:49 +0300970 struct mlx5_ib_resources devr;
Saeed Mahameedfc6a9f82020-03-10 10:22:28 +0200971
Saeed Mahameedf743ff32020-03-10 10:22:29 +0200972 atomic_t mkey_var;
Eli Cohene126ba92013-07-07 17:25:49 +0300973 struct mlx5_mr_cache cache;
Eli Cohen746b5582013-10-23 09:53:14 +0300974 struct timer_list delay_timer;
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300975 /* Prevents soft lock on massive reg MRs */
976 struct mutex slow_path_mutex;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200977 struct ib_odp_caps odp_caps;
Artemy Kovalyovc438fde2017-01-02 11:37:43 +0200978 u64 odp_max_size;
Saeed Mahameedd5d284b2018-11-19 10:52:41 -0800979 struct mlx5_ib_pf_eq odp_pf_eq;
980
Haggai Eran6aec21f2014-12-11 17:04:23 +0200981 /*
982 * Sleepable RCU that prevents destruction of MRs while they are still
983 * being used by a page fault handler.
984 */
Jason Gunthorpe806b1012019-10-09 13:09:23 -0300985 struct srcu_struct odp_srcu;
986 struct xarray odp_mkeys;
987
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200988 u32 null_mkey;
Mark Bloch9a4ca382018-01-16 14:42:35 +0000989 struct mlx5_ib_flow_db *flow_db;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300990 /* protect resources needed as part of reset flow */
991 spinlock_t reset_flow_resource_lock;
992 struct list_head qp_list;
Mark Bloch0837e862016-06-17 15:10:55 +0300993 /* Array with num_ports elements */
994 struct mlx5_ib_port *port;
Huy Nguyenc85023e2017-05-30 09:42:54 +0300995 struct mlx5_sq_bfreg bfreg;
Michael Guralnik11f552e2019-06-10 15:21:24 +0300996 struct mlx5_sq_bfreg wc_bfreg;
Huy Nguyenc85023e2017-05-30 09:42:54 +0300997 struct mlx5_sq_bfreg fp_bfreg;
Maor Gottlieb03404e82017-05-30 10:29:13 +0300998 struct mlx5_ib_delay_drop delay_drop;
Mark Bloch16c19752018-01-01 13:06:58 +0200999 const struct mlx5_ib_profile *profile;
Huy Nguyenc85023e2017-05-30 09:42:54 +03001000
Mark Blocha560f1d2018-09-17 13:30:47 +03001001 struct mlx5_ib_lb_state lb;
Huy Nguyenc85023e2017-05-30 09:42:54 +03001002 u8 umr_fence;
Daniel Jurgens32f69e42018-01-04 17:25:36 +02001003 struct list_head ib_dev_list;
1004 u64 sys_image_guid;
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001005 struct mlx5_dm dm;
Yishai Hadas76dc5a82018-09-20 21:45:19 +03001006 u16 devx_whitelist_uid;
Leon Romanovskyf3da6572018-11-28 20:53:41 +02001007 struct mlx5_srq_table srq_table;
Leon Romanovsky333fbaa2020-04-04 10:40:24 +03001008 struct mlx5_qp_table qp_table;
Jason Gunthorpee3554772019-01-18 16:33:10 -08001009 struct mlx5_async_ctx async_ctx;
Yishai Hadase337dd52019-06-30 19:23:30 +03001010 struct mlx5_devx_event_table devx_event_table;
Yishai Hadasf164be82019-12-12 13:09:26 +02001011 struct mlx5_var_table var_table;
Jason Gunthorpe50211ec2019-10-09 13:09:22 -03001012
1013 struct xarray sig_mrs;
Eli Cohene126ba92013-07-07 17:25:49 +03001014};
1015
1016static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1017{
1018 return container_of(mcq, struct mlx5_ib_cq, mcq);
1019}
1020
1021static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1022{
1023 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1024}
1025
1026static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1027{
1028 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1029}
1030
Jason Gunthorpee79c9c62019-04-01 17:08:23 -03001031static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1032{
1033 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1034 udata, struct mlx5_ib_ucontext, ibucontext);
1035
1036 return to_mdev(context->ibucontext.device);
1037}
1038
Eli Cohene126ba92013-07-07 17:25:49 +03001039static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1040{
1041 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1042}
1043
1044static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1045{
majd@mellanox.com19098df2016-01-14 19:13:03 +02001046 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
Eli Cohene126ba92013-07-07 17:25:49 +03001047}
1048
Yishai Hadas350d0e42016-08-28 14:58:18 +03001049static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1050{
1051 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1052}
1053
Eli Cohene126ba92013-07-07 17:25:49 +03001054static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1055{
1056 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1057}
1058
1059static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1060{
1061 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1062}
1063
1064static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1065{
1066 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1067}
1068
Yishai Hadas79b20a62016-05-23 15:20:50 +03001069static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1070{
1071 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1072}
1073
Yishai Hadasc5f90922016-05-23 15:20:53 +03001074static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1075{
1076 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1077}
1078
Eli Cohene126ba92013-07-07 17:25:49 +03001079static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1080{
1081 return container_of(msrq, struct mlx5_ib_srq, msrq);
1082}
1083
Ariel Levkovich24da0012018-04-05 18:53:27 +03001084static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
1085{
1086 return container_of(ibdm, struct mlx5_ib_dm, ibdm);
1087}
1088
Eli Cohene126ba92013-07-07 17:25:49 +03001089static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1090{
1091 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1092}
1093
Matan Barakd2370e02016-02-29 18:05:30 +02001094static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1095{
1096 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1097}
1098
Aviad Yehezkelc6475a02018-03-28 09:27:50 +03001099static inline struct mlx5_ib_flow_action *
1100to_mflow_act(struct ib_flow_action *ibact)
1101{
1102 return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1103}
1104
Yishai Hadasdc2316e2019-12-12 12:02:37 +02001105static inline struct mlx5_user_mmap_entry *
1106to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1107{
1108 return container_of(rdma_entry,
1109 struct mlx5_user_mmap_entry, rdma_entry);
1110}
1111
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001112int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
1113 struct ib_udata *udata, unsigned long virt,
Eli Cohene126ba92013-07-07 17:25:49 +03001114 struct mlx5_db *db);
1115void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1116void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1117void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1118void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
Maor Gottliebfa5d0102020-04-30 22:21:42 +03001119int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
Leon Romanovskyd3456912019-04-03 16:42:42 +03001120 struct ib_udata *udata);
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001121int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
Leon Romanovsky9a9ebf82020-09-07 15:09:14 +03001122static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
1123{
1124 return 0;
1125}
Leon Romanovsky68e326d2019-04-03 16:42:43 +03001126int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1127 struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +03001128int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1129 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1130int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
Leon Romanovsky119181d2020-09-07 15:09:16 +03001131int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001132int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1133 const struct ib_recv_wr **bad_wr);
Mark Bloch0042f9e2018-09-17 13:30:49 +03001134int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1135void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
Eli Cohene126ba92013-07-07 17:25:49 +03001136struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1137 struct ib_qp_init_attr *init_attr,
1138 struct ib_udata *udata);
1139int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1140 int attr_mask, struct ib_udata *udata);
1141int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1142 struct ib_qp_init_attr *qp_init_attr);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001143int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
Yishai Hadasd0e84c02018-06-19 10:43:55 +03001144void mlx5_ib_drain_sq(struct ib_qp *qp);
1145void mlx5_ib_drain_rq(struct ib_qp *qp);
Moni Shouada9ee9d2020-01-15 14:43:34 +02001146int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1147 size_t buflen, size_t *bc);
1148int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1149 size_t buflen, size_t *bc);
1150int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1151 size_t buflen, size_t *bc);
Leon Romanovskye39afe32019-05-28 14:37:29 +03001152int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1153 struct ib_udata *udata);
Leon Romanovsky43d781b2020-09-07 15:09:18 +03001154int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +03001155int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1156int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1157int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1158int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1159struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1160struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1161 u64 virt_addr, int access_flags,
1162 struct ib_udata *udata);
Moni Shoua813e90b2018-12-11 13:37:53 +02001163int mlx5_ib_advise_mr(struct ib_pd *pd,
1164 enum ib_uverbs_advise_mr_advice advice,
1165 u32 flags,
1166 struct ib_sge *sg_list,
1167 u32 num_sge,
1168 struct uverbs_attr_bundle *attrs);
Leon Romanovskyd18bb3e2020-09-02 11:16:22 +03001169int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
Matan Barakd2370e02016-02-29 18:05:30 +02001170int mlx5_ib_dealloc_mw(struct ib_mw *mw);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001171int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1172 int page_shift, int flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001173struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001174 struct ib_udata *udata,
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001175 int access_flags);
1176void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
Jason Gunthorpe09689702019-10-09 13:09:34 -03001177void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001178int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1179 u64 length, u64 virt_addr, int access_flags,
1180 struct ib_pd *pd, struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001181int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1182struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
Gal Pressman42a3b152020-07-06 15:03:43 +03001183 u32 max_num_sg);
Max Gurtovoy6c984472019-06-11 18:52:42 +03001184struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1185 u32 max_num_sg,
1186 u32 max_num_meta_sg);
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001187int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001188 unsigned int *sg_offset);
Max Gurtovoy6c984472019-06-11 18:52:42 +03001189int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1190 int data_sg_nents, unsigned int *data_sg_offset,
1191 struct scatterlist *meta_sg, int meta_sg_nents,
1192 unsigned int *meta_sg_offset);
Eli Cohene126ba92013-07-07 17:25:49 +03001193int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -04001194 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
Leon Romanovskye26e7b82019-10-29 08:27:45 +02001195 const struct ib_mad *in, struct ib_mad *out,
1196 size_t *out_mad_size, u16 *out_mad_pkey_index);
Leon Romanovsky28ad5f652020-06-30 13:18:54 +03001197int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
Leon Romanovskyd0c45c82020-09-07 15:09:19 +03001198int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +03001199int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
1200int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001201int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
1202 struct ib_smp *out_mad);
1203int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1204 __be64 *sys_image_guid);
1205int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1206 u16 *max_pkeys);
1207int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1208 u32 *vendor_id);
1209int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1210int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1211int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
1212 u16 *pkey);
1213int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
1214 union ib_gid *gid);
1215int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
1216 struct ib_port_attr *props);
Eli Cohene126ba92013-07-07 17:25:49 +03001217int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1218 struct ib_port_attr *props);
Majd Dibbiny762f8992016-10-27 16:36:47 +03001219void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
1220 unsigned long max_page_shift,
1221 int *count, int *shift,
Eli Cohene126ba92013-07-07 17:25:49 +03001222 int *ncont, int *order);
Haggai Eran832a6b02014-12-11 17:04:22 +02001223void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1224 int page_shift, size_t offset, size_t num_pages,
1225 __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001226void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
Haggai Erancc149f752014-12-11 17:04:21 +02001227 int page_shift, __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001228void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
Yonatan Cohen5d6ff1b2018-10-09 12:05:13 +03001229int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
Eli Cohene126ba92013-07-07 17:25:49 +03001230int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1231int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001232
Jason Gunthorpeb91e1752020-03-10 10:22:32 +02001233struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
Jason Gunthorpe8383da32020-09-14 14:26:53 +03001234 unsigned int entry, int access_flags);
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001235void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Jason Gunthorpe09689702019-10-09 13:09:34 -03001236int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
1237
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001238int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1239 struct ib_mr_status *mr_status);
Yishai Hadas79b20a62016-05-23 15:20:50 +03001240struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1241 struct ib_wq_init_attr *init_attr,
1242 struct ib_udata *udata);
Leon Romanovskyadd53532020-09-07 15:09:20 +03001243int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
Yishai Hadas79b20a62016-05-23 15:20:50 +03001244int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1245 u32 wq_attr_mask, struct ib_udata *udata);
Leon Romanovskyc0a6b5e2020-09-02 11:16:23 +03001246int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
1247 struct ib_rwq_ind_table_init_attr *init_attr,
1248 struct ib_udata *udata);
Yishai Hadasc5f90922016-05-23 15:20:53 +03001249int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
Ariel Levkovich24da0012018-04-05 18:53:27 +03001250struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
1251 struct ib_ucontext *context,
1252 struct ib_dm_alloc_attr *attr,
1253 struct uverbs_attr_bundle *attrs);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001254int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001255struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1256 struct ib_dm_mr_attr *attr,
1257 struct uverbs_attr_bundle *attrs);
Eli Cohene126ba92013-07-07 17:25:49 +03001258
Haggai Eran8cdd3122014-12-11 17:04:20 +02001259#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Saeed Mahameed938fe832015-05-28 22:28:41 +03001260void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001261int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
Saeed Mahameedd5d284b2018-11-19 10:52:41 -08001262void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001263int __init mlx5_ib_odp_init(void);
1264void mlx5_ib_odp_cleanup(void);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001265void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
Artemy Kovalyovcbe4b8f2019-12-22 14:46:47 +02001266void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1267 struct mlx5_ib_mr *mr, int flags);
Moni Shoua813e90b2018-12-11 13:37:53 +02001268
1269int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1270 enum ib_uverbs_advise_mr_advice advice,
1271 u32 flags, struct ib_sge *sg_list, u32 num_sge);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001272#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
Saeed Mahameed938fe832015-05-28 22:28:41 +03001273static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
Haggai Eran8cdd3122014-12-11 17:04:20 +02001274{
Saeed Mahameed938fe832015-05-28 22:28:41 +03001275 return;
Haggai Eran8cdd3122014-12-11 17:04:20 +02001276}
Haggai Eran6aec21f2014-12-11 17:04:23 +02001277
Haggai Eran6aec21f2014-12-11 17:04:23 +02001278static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
Saeed Mahameedd5d284b2018-11-19 10:52:41 -08001279static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
Haggai Eran6aec21f2014-12-11 17:04:23 +02001280static inline int mlx5_ib_odp_init(void) { return 0; }
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001281static inline void mlx5_ib_odp_cleanup(void) {}
1282static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
Artemy Kovalyovcbe4b8f2019-12-22 14:46:47 +02001283static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1284 struct mlx5_ib_mr *mr, int flags) {}
Haggai Eran6aec21f2014-12-11 17:04:23 +02001285
Doug Ledfordc9e585e2018-12-19 13:43:17 -05001286static inline int
1287mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1288 enum ib_uverbs_advise_mr_advice advice, u32 flags,
1289 struct ib_sge *sg_list, u32 num_sge)
Moni Shoua813e90b2018-12-11 13:37:53 +02001290{
1291 return -EOPNOTSUPP;
1292}
Haggai Eran8cdd3122014-12-11 17:04:20 +02001293#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1294
Jason Gunthorpef25a5462019-11-12 16:22:22 -04001295extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1296
Mark Blochb5ca15a2018-01-23 11:16:30 +00001297/* Needed for rep profile */
Mark Blochb5ca15a2018-01-23 11:16:30 +00001298void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1299 const struct mlx5_ib_profile *profile,
1300 int stage);
1301void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
1302 const struct mlx5_ib_profile *profile);
1303
Arnd Bergmann9967c702016-03-23 11:37:45 +01001304int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1305 u8 port, struct ifla_vf_info *info);
1306int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1307 u8 port, int state);
1308int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1309 u8 port, struct ifla_vf_stats *stats);
Danit Goldberg9c0015e2019-11-06 15:18:12 +02001310int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
1311 struct ifla_vf_guid *node_guid,
1312 struct ifla_vf_guid *port_guid);
Arnd Bergmann9967c702016-03-23 11:37:45 +01001313int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
1314 u64 guid, int type);
1315
Mark Zhang5ac55df2020-05-04 08:19:35 +03001316__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1317 const struct ib_gid_attr *attr);
Achiad Shochat2811ba52015-12-23 18:47:24 +02001318
Parav Pandita9e546e2018-01-04 17:25:39 +02001319void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
Greg Kroah-Hartman73eb8f02019-01-22 16:17:57 +01001320void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
Parav Pandit4a2da0b2017-05-30 10:05:15 +03001321
Haggai Erand16e91d2016-02-29 15:45:05 +02001322/* GSI QP helper functions */
1323struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
1324 struct ib_qp_init_attr *init_attr);
1325int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
1326int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1327 int attr_mask);
1328int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1329 int qp_attr_mask,
1330 struct ib_qp_init_attr *qp_init_attr);
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001331int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1332 const struct ib_send_wr **bad_wr);
1333int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1334 const struct ib_recv_wr **bad_wr);
Haggai Eran7722f472016-02-29 15:45:07 +02001335void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
Haggai Erand16e91d2016-02-29 15:45:05 +02001336
Haggai Eran25361e02016-02-29 15:45:08 +02001337int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1338
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001339void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1340 int bfregn);
Daniel Jurgens32f69e42018-01-04 17:25:36 +02001341struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1342struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1343 u8 ib_port_num,
1344 u8 *native_port_num);
1345void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1346 u8 port_num);
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001347
Jason Gunthorpe8889f6f2020-01-30 11:21:21 -04001348extern const struct uapi_definition mlx5_ib_devx_defs[];
1349extern const struct uapi_definition mlx5_ib_flow_defs[];
Yishai Hadas30f2fe42020-02-19 21:05:18 +02001350extern const struct uapi_definition mlx5_ib_qos_defs[];
Yishai Hadas05f71ef92020-06-30 12:39:15 +03001351extern const struct uapi_definition mlx5_ib_std_types_defs[];
1352
Eli Cohene126ba92013-07-07 17:25:49 +03001353static inline void init_query_mad(struct ib_smp *mad)
1354{
1355 mad->base_version = 1;
1356 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1357 mad->class_version = 1;
1358 mad->method = IB_MGMT_METHOD_GET;
1359}
1360
Sagi Grimbergb6364012015-09-02 22:23:04 +03001361static inline int is_qp1(enum ib_qp_type qp_type)
1362{
Haggai Erand16e91d2016-02-29 15:45:05 +02001363 return qp_type == MLX5_IB_QPT_HW_GSI;
Sagi Grimbergb6364012015-09-02 22:23:04 +03001364}
1365
Haggai Erancc149f752014-12-11 17:04:21 +02001366#define MLX5_MAX_UMR_SHIFT 16
1367#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1368
Leon Romanovsky051f2632015-12-20 12:16:11 +02001369static inline u32 check_cq_create_flags(u32 flags)
1370{
1371 /*
1372 * It returns non-zero value for unsupported CQ
1373 * create flags, otherwise it returns zero.
1374 */
Jason Gunthorpebeb801a2018-01-26 15:16:46 -07001375 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1376 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
Leon Romanovsky051f2632015-12-20 12:16:11 +02001377}
Haggai Abramovskycfb5e082016-01-14 19:12:57 +02001378
1379static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1380 u32 *user_index)
1381{
1382 if (cqe_version) {
1383 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1384 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1385 return -EINVAL;
1386 *user_index = cmd_uidx;
1387 } else {
1388 *user_index = MLX5_IB_DEFAULT_UIDX;
1389 }
1390
1391 return 0;
1392}
Leon Romanovsky3085e292016-09-22 17:31:11 +03001393
1394static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1395 struct mlx5_ib_create_qp *ucmd,
1396 int inlen,
1397 u32 *user_index)
1398{
1399 u8 cqe_version = ucontext->cqe_version;
1400
Leon Romanovskya762d462020-03-10 11:14:31 +02001401 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1402 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
Leon Romanovsky3085e292016-09-22 17:31:11 +03001403 return 0;
1404
Leon Romanovskya762d462020-03-10 11:14:31 +02001405 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
Leon Romanovsky3085e292016-09-22 17:31:11 +03001406 return -EINVAL;
1407
1408 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1409}
1410
1411static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1412 struct mlx5_ib_create_srq *ucmd,
1413 int inlen,
1414 u32 *user_index)
1415{
1416 u8 cqe_version = ucontext->cqe_version;
1417
Leon Romanovskya762d462020-03-10 11:14:31 +02001418 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1419 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
Leon Romanovsky3085e292016-09-22 17:31:11 +03001420 return 0;
1421
Leon Romanovskya762d462020-03-10 11:14:31 +02001422 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
Leon Romanovsky3085e292016-09-22 17:31:11 +03001423 return -EINVAL;
1424
1425 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1426}
Eli Cohenb037c292017-01-03 23:55:26 +02001427
1428static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1429{
1430 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1431 MLX5_UARS_IN_PAGE : 1;
1432}
1433
Yishai Hadas31a78a52017-12-24 16:31:34 +02001434static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1435 struct mlx5_bfreg_info *bfregi)
Eli Cohenb037c292017-01-03 23:55:26 +02001436{
Yishai Hadas31a78a52017-12-24 16:31:34 +02001437 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
Eli Cohenb037c292017-01-03 23:55:26 +02001438}
1439
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001440unsigned long mlx5_ib_get_xlt_emergency_page(void);
1441void mlx5_ib_put_xlt_emergency_page(void);
1442
Yishai Hadas7c043e92018-06-17 13:00:03 +03001443int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
Leon Romanovsky05f58ce2018-07-08 13:50:21 +03001444 struct mlx5_bfreg_info *bfregi, u32 bfregn,
Yishai Hadas7c043e92018-06-17 13:00:03 +03001445 bool dyn_bfreg);
Mark Zhangd14133d2019-07-02 13:02:36 +03001446
Jason Gunthorpe8383da32020-09-14 14:26:53 +03001447static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev,
1448 size_t length)
Moni Shoua0e6613b2019-08-15 11:38:31 +03001449{
Jason Gunthorpe8383da32020-09-14 14:26:53 +03001450 /*
1451 * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
1452 * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
1453 * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
1454 * can never be enabled without this capability. Simplify this weird
1455 * quirky hardware by just saying it can't use PAS lists with UMR at
1456 * all.
1457 */
Moni Shoua0e6613b2019-08-15 11:38:31 +03001458 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1459 return false;
1460
Jason Gunthorpe8383da32020-09-14 14:26:53 +03001461 /*
1462 * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
1463 * used.
1464 */
1465 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
1466 length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
1467 return false;
1468 return true;
1469}
1470
1471/*
1472 * true if an existing MR can be reconfigured to new access_flags using UMR.
1473 * Older HW cannot use UMR to update certain elements of the MKC. See
1474 * umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask()
1475 */
1476static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
1477 unsigned int current_access_flags,
1478 unsigned int target_access_flags)
1479{
1480 unsigned int diffs = current_access_flags ^ target_access_flags;
1481
1482 if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
Moni Shoua0e6613b2019-08-15 11:38:31 +03001483 MLX5_CAP_GEN(dev->mdev, atomic) &&
1484 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1485 return false;
1486
Jason Gunthorpe8383da32020-09-14 14:26:53 +03001487 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
Meir Lichtinger896ec972020-07-16 13:52:48 +03001488 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
1489 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
1490 return false;
1491
Jason Gunthorpe8383da32020-09-14 14:26:53 +03001492 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
1493 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
1494 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
Michael Guralnikd6de0bb2020-01-08 20:05:40 +02001495 return false;
1496
Moni Shoua0e6613b2019-08-15 11:38:31 +03001497 return true;
1498}
Michael Guralnik11f552e2019-06-10 15:21:24 +03001499
Michael Guralnik11f552e2019-06-10 15:21:24 +03001500int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
Mark Zhang802dcc72020-05-27 08:50:14 +03001501
1502static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1503{
1504 return dev->lag_active ||
1505 (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1506 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
1507}
Eli Cohene126ba92013-07-07 17:25:49 +03001508#endif /* MLX5_IB_H */