blob: 7dffc87601eb16aec17773274a1dccf170ae72f2 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_H
34#define MLX5_IB_H
35
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <rdma/ib_verbs.h>
Leon Romanovsky8b4d5bc2019-01-08 16:07:25 +020039#include <rdma/ib_umem.h>
Eli Cohene126ba92013-07-07 17:25:49 +030040#include <rdma/ib_smi.h>
41#include <linux/mlx5/driver.h>
42#include <linux/mlx5/cq.h>
Mark Blochb823dd62018-09-06 17:27:05 +030043#include <linux/mlx5/fs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030044#include <linux/mlx5/qp.h>
Eli Cohene126ba92013-07-07 17:25:49 +030045#include <linux/types.h>
majd@mellanox.com146d2f12016-01-14 19:13:02 +020046#include <linux/mlx5/transobj.h>
Matan Barakd2370e02016-02-29 18:05:30 +020047#include <rdma/ib_user_verbs.h>
Leon Romanovsky3085e292016-09-22 17:31:11 +030048#include <rdma/mlx5-abi.h>
Ariel Levkovich24da0012018-04-05 18:53:27 +030049#include <rdma/uverbs_ioctl.h>
Yishai Hadasfd44e382018-07-23 15:25:07 +030050#include <rdma/mlx5_user_ioctl_cmds.h>
Ariel Levkovich3b113a12019-05-05 17:07:11 +030051#include <rdma/mlx5_user_ioctl_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030052
Leon Romanovskyf3da6572018-11-28 20:53:41 +020053#include "srq.h"
54
Jason Gunthorpe5a738b52018-09-20 16:42:24 -060055#define mlx5_ib_dbg(_dev, format, arg...) \
56 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
57 __LINE__, current->pid, ##arg)
Eli Cohene126ba92013-07-07 17:25:49 +030058
Jason Gunthorpe5a738b52018-09-20 16:42:24 -060059#define mlx5_ib_err(_dev, format, arg...) \
60 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
61 __LINE__, current->pid, ##arg)
Eli Cohene126ba92013-07-07 17:25:49 +030062
Jason Gunthorpe5a738b52018-09-20 16:42:24 -060063#define mlx5_ib_warn(_dev, format, arg...) \
64 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
65 __LINE__, current->pid, ##arg)
Eli Cohene126ba92013-07-07 17:25:49 +030066
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020067#define MLX5_IB_DEFAULT_UIDX 0xffffff
68#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
Matan Barakb368d7c2015-12-15 20:30:12 +020069
Majd Dibbiny762f8992016-10-27 16:36:47 +030070#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
71
Eli Cohene126ba92013-07-07 17:25:49 +030072enum {
Yishai Hadas7be76be2019-12-12 13:09:27 +020073 MLX5_IB_MMAP_OFFSET_START = 9,
74 MLX5_IB_MMAP_OFFSET_END = 255,
75};
76
77enum {
Eli Cohene126ba92013-07-07 17:25:49 +030078 MLX5_IB_MMAP_CMD_SHIFT = 8,
79 MLX5_IB_MMAP_CMD_MASK = 0xff,
80};
81
Eli Cohene126ba92013-07-07 17:25:49 +030082enum {
83 MLX5_RES_SCAT_DATA32_CQE = 0x1,
84 MLX5_RES_SCAT_DATA64_CQE = 0x2,
85 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
86 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
87};
88
Eli Cohene126ba92013-07-07 17:25:49 +030089enum mlx5_ib_mad_ifc_flags {
90 MLX5_MAD_IFC_IGNORE_MKEY = 1,
91 MLX5_MAD_IFC_IGNORE_BKEY = 2,
92 MLX5_MAD_IFC_NET_VIEW = 4,
93};
94
Leon Romanovsky051f2632015-12-20 12:16:11 +020095enum {
Eli Cohen2f5ff262017-01-03 23:55:21 +020096 MLX5_CROSS_CHANNEL_BFREG = 0,
Leon Romanovsky051f2632015-12-20 12:16:11 +020097};
98
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020099enum {
100 MLX5_CQE_VERSION_V0,
101 MLX5_CQE_VERSION_V1,
102};
103
Artemy Kovalyoveb761892017-08-17 15:52:09 +0300104enum {
105 MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
106 MLX5_TM_MAX_SGE = 1,
107};
108
Yishai Hadas4ed131d2017-12-24 16:31:35 +0200109enum {
110 MLX5_IB_INVALID_UAR_INDEX = BIT(31),
Yishai Hadas1ee47ab2017-12-24 16:31:36 +0200111 MLX5_IB_INVALID_BFREG = BIT(31),
Yishai Hadas4ed131d2017-12-24 16:31:35 +0200112};
113
Ariel Levkovich24da0012018-04-05 18:53:27 +0300114enum {
115 MLX5_MAX_MEMIC_PAGES = 0x100,
116 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
117};
118
119enum {
120 MLX5_MEMIC_BASE_ALIGN = 6,
121 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
122};
123
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200124enum mlx5_ib_mmap_type {
125 MLX5_IB_MMAP_TYPE_MEMIC = 1,
Yishai Hadas7be76be2019-12-12 13:09:27 +0200126 MLX5_IB_MMAP_TYPE_VAR = 2,
Yishai Hadas342ee592020-03-24 08:01:39 +0200127 MLX5_IB_MMAP_TYPE_UAR_WC = 3,
128 MLX5_IB_MMAP_TYPE_UAR_NC = 4,
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200129};
130
Leon Romanovsky21528622020-03-24 08:01:42 +0200131struct mlx5_bfreg_info {
132 u32 *sys_pages;
133 int num_low_latency_bfregs;
134 unsigned int *count;
135
136 /*
137 * protect bfreg allocation data structs
138 */
139 struct mutex lock;
140 u32 ver;
141 u8 lib_uar_4k : 1;
Yishai Hadas0a2fd012020-03-24 08:01:43 +0200142 u8 lib_uar_dyn : 1;
Leon Romanovsky21528622020-03-24 08:01:42 +0200143 u32 num_sys_pages;
144 u32 num_static_sys_pages;
145 u32 total_num_bfregs;
146 u32 num_dyn_bfregs;
147};
Ariel Levkovich25c13322019-05-05 17:07:13 +0300148
Eli Cohene126ba92013-07-07 17:25:49 +0300149struct mlx5_ib_ucontext {
150 struct ib_ucontext ibucontext;
151 struct list_head db_page_list;
152
153 /* protect doorbell record alloc/free
154 */
155 struct mutex db_page_mutex;
Eli Cohen2f5ff262017-01-03 23:55:21 +0200156 struct mlx5_bfreg_info bfregi;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200157 u8 cqe_version;
majd@mellanox.com146d2f12016-01-14 19:13:02 +0200158 /* Transport Domain number */
159 u32 tdn;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200160
Eli Cohenb037c292017-01-03 23:55:26 +0200161 u64 lib_caps;
Yishai Hadasa8b92ca2018-06-17 12:59:57 +0300162 u16 devx_uid;
Majd Dibbinyc6a21c32018-08-28 14:29:05 +0300163 /* For RoCE LAG TX affinity */
164 atomic_t tx_port_affinity;
Eli Cohene126ba92013-07-07 17:25:49 +0300165};
166
167static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
168{
169 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
170}
171
172struct mlx5_ib_pd {
173 struct ib_pd ibpd;
174 u32 pdn;
Yishai Hadasa1069c12018-09-20 21:39:19 +0300175 u16 uid;
Eli Cohene126ba92013-07-07 17:25:49 +0300176};
177
Mark Blochb4749bf2018-08-28 14:18:51 +0300178enum {
179 MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
Mark Blocha090d0d2018-08-28 14:18:54 +0300180 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
Mark Bloch08aeb972018-08-28 14:18:53 +0300181 MLX5_IB_FLOW_ACTION_DECAP,
Eli Cohene126ba92013-07-07 17:25:49 +0300182};
183
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200184#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
Maor Gottlieb35d190112016-03-07 18:51:47 +0200185#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200186#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
187#error "Invalid number of bypass priorities"
188#endif
189#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
190
191#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
Maor Gottliebcc0e5d42016-08-28 14:16:34 +0300192#define MLX5_IB_NUM_SNIFFER_FTS 2
Aviad Yehezkel802c2122018-03-28 09:27:53 +0300193#define MLX5_IB_NUM_EGRESS_FTS 1
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200194struct mlx5_ib_flow_prio {
195 struct mlx5_flow_table *flow_table;
196 unsigned int refcount;
197};
198
199struct mlx5_ib_flow_handler {
200 struct list_head list;
201 struct ib_flow ibflow;
Maor Gottlieb5497adc2016-08-28 14:16:31 +0300202 struct mlx5_ib_flow_prio *prio;
Mark Bloch74491de2016-08-31 11:24:25 +0000203 struct mlx5_flow_handle *rule;
Raed Salem3b3233f2018-05-31 16:43:39 +0300204 struct ib_counters *ibcounters;
Yishai Hadasd4be3f42018-07-23 15:25:10 +0300205 struct mlx5_ib_dev *dev;
206 struct mlx5_ib_flow_matcher *flow_matcher;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200207};
208
Yishai Hadasfd44e382018-07-23 15:25:07 +0300209struct mlx5_ib_flow_matcher {
210 struct mlx5_ib_match_params matcher_mask;
211 int mask_len;
212 enum mlx5_ib_flow_type flow_type;
Mark Blochb47fd4f2018-09-06 17:27:07 +0300213 enum mlx5_flow_namespace_type ns_type;
Yishai Hadasfd44e382018-07-23 15:25:07 +0300214 u16 priority;
215 struct mlx5_core_dev *mdev;
216 atomic_t usecnt;
217 u8 match_criteria_enable;
218};
219
Yishai Hadas30f2fe42020-02-19 21:05:18 +0200220struct mlx5_ib_pp {
221 u16 index;
222 struct mlx5_core_dev *mdev;
223};
224
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200225struct mlx5_ib_flow_db {
226 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
Mark Bloch78dd0c42018-09-02 12:51:31 +0300227 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
Maor Gottliebcc0e5d42016-08-28 14:16:34 +0300228 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
Aviad Yehezkel802c2122018-03-28 09:27:53 +0300229 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
Mark Bloch13a43762019-03-28 15:46:21 +0200230 struct mlx5_ib_flow_prio fdb;
Mark Zhangd8abe882019-08-19 14:36:26 +0300231 struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
Michael Guralnikaf9c3842020-03-24 08:14:25 +0200232 struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
Aviv Heller9ef9c642016-09-18 20:48:01 +0300233 struct mlx5_flow_table *lag_demux_ft;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200234 /* Protect flow steering bypass flow tables
235 * when add/del flow rules.
236 * only single add/removal of flow steering rule could be done
237 * simultaneously.
238 */
239 struct mutex lock;
240};
241
Eli Cohene126ba92013-07-07 17:25:49 +0300242/* Use macros here so that don't have to duplicate
243 * enum ib_send_flags and enum ib_qp_type for low-level driver
244 */
245
Artemy Kovalyov31616252017-01-02 11:37:42 +0200246#define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
247#define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
248#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
249#define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
250#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
251#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
Noa Osherovich56e11d62016-02-29 16:46:51 +0200252
Eli Cohene126ba92013-07-07 17:25:49 +0300253#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
Haggai Erand16e91d2016-02-29 15:45:05 +0200254/*
255 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
256 * creates the actual hardware QP.
257 */
258#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
Moni Shouab4aaa1f2018-01-02 16:19:31 +0200259#define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
260#define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
Eli Cohene126ba92013-07-07 17:25:49 +0300261#define MLX5_IB_WR_UMR IB_WR_RESERVED1
262
Artemy Kovalyov31616252017-01-02 11:37:42 +0200263#define MLX5_IB_UMR_OCTOWORD 16
264#define MLX5_IB_UMR_XLT_ALIGNMENT 64
265
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200266#define MLX5_IB_UPD_XLT_ZAP BIT(0)
267#define MLX5_IB_UPD_XLT_ENABLE BIT(1)
268#define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
269#define MLX5_IB_UPD_XLT_ADDR BIT(3)
270#define MLX5_IB_UPD_XLT_PD BIT(4)
271#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200272#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200273
Haggai Eranb11a4f92016-02-29 15:45:03 +0200274/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
275 *
276 * These flags are intended for internal use by the mlx5_ib driver, and they
277 * rely on the range reserved for that use in the ib_qp_create_flags enum.
278 */
Michael Guralnik3f89b012019-10-20 09:43:59 +0300279#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
Michael Guralnik11f552e2019-06-10 15:21:24 +0300280#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
Haggai Eranb11a4f92016-02-29 15:45:03 +0200281
Eli Cohene126ba92013-07-07 17:25:49 +0300282struct wr_list {
283 u16 opcode;
284 u16 next;
285};
286
Noa Osheroviche4cc4fa2017-01-18 15:40:03 +0200287enum mlx5_ib_rq_flags {
288 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
Noa Osherovichb1383aa2017-10-29 13:59:45 +0200289 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
Noa Osheroviche4cc4fa2017-01-18 15:40:03 +0200290};
291
Eli Cohene126ba92013-07-07 17:25:49 +0300292struct mlx5_ib_wq {
Guy Levi34f4c952018-11-26 08:15:50 +0200293 struct mlx5_frag_buf_ctrl fbc;
Eli Cohene126ba92013-07-07 17:25:49 +0300294 u64 *wrid;
295 u32 *wr_data;
296 struct wr_list *w_list;
297 unsigned *wqe_head;
298 u16 unsig_count;
299
300 /* serialize post to the work queue
301 */
302 spinlock_t lock;
303 int wqe_cnt;
304 int max_post;
305 int max_gs;
306 int offset;
307 int wqe_shift;
308 unsigned head;
309 unsigned tail;
310 u16 cur_post;
Leon Romanovsky950bf4f2020-03-18 11:16:40 +0200311 u16 last_poll;
Guy Levi34f4c952018-11-26 08:15:50 +0200312 void *cur_edge;
Eli Cohene126ba92013-07-07 17:25:49 +0300313};
314
Maor Gottlieb03404e82017-05-30 10:29:13 +0300315enum mlx5_ib_wq_flags {
316 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
Noa Osherovichccc87082017-10-17 18:01:13 +0300317 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
Maor Gottlieb03404e82017-05-30 10:29:13 +0300318};
319
Noa Osherovichb4f34592017-10-17 18:01:12 +0300320#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
321#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
322#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
323#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
Mark Zhangc16339b2019-11-15 17:45:55 +0200324#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
Noa Osherovichb4f34592017-10-17 18:01:12 +0300325
Yishai Hadas79b20a62016-05-23 15:20:50 +0300326struct mlx5_ib_rwq {
327 struct ib_wq ibwq;
Yishai Hadas350d0e42016-08-28 14:58:18 +0300328 struct mlx5_core_qp core_qp;
Yishai Hadas79b20a62016-05-23 15:20:50 +0300329 u32 rq_num_pas;
330 u32 log_rq_stride;
331 u32 log_rq_size;
332 u32 rq_page_offset;
333 u32 log_page_size;
Noa Osherovichccc87082017-10-17 18:01:13 +0300334 u32 log_num_strides;
335 u32 two_byte_shift_en;
336 u32 single_stride_log_num_of_bytes;
Yishai Hadas79b20a62016-05-23 15:20:50 +0300337 struct ib_umem *umem;
338 size_t buf_size;
339 unsigned int page_shift;
Yishai Hadas79b20a62016-05-23 15:20:50 +0300340 struct mlx5_db db;
341 u32 user_index;
342 u32 wqe_count;
343 u32 wqe_shift;
344 int wq_sig;
Maor Gottlieb03404e82017-05-30 10:29:13 +0300345 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
Yishai Hadas79b20a62016-05-23 15:20:50 +0300346};
347
Yishai Hadasc5f90922016-05-23 15:20:53 +0300348struct mlx5_ib_rwq_ind_table {
349 struct ib_rwq_ind_table ib_rwq_ind_tbl;
350 u32 rqtn;
Yishai Hadas5deba862018-09-20 21:39:28 +0300351 u16 uid;
Yishai Hadasc5f90922016-05-23 15:20:53 +0300352};
353
majd@mellanox.com19098df2016-01-14 19:13:03 +0200354struct mlx5_ib_ubuffer {
355 struct ib_umem *umem;
356 int buf_size;
357 u64 buf_addr;
358};
359
360struct mlx5_ib_qp_base {
361 struct mlx5_ib_qp *container_mibqp;
362 struct mlx5_core_qp mqp;
363 struct mlx5_ib_ubuffer ubuffer;
364};
365
366struct mlx5_ib_qp_trans {
367 struct mlx5_ib_qp_base base;
368 u16 xrcdn;
369 u8 alt_port;
370 u8 atomic_rd_en;
371 u8 resp_depth;
372};
373
Yishai Hadas28d61372016-05-23 15:20:56 +0300374struct mlx5_ib_rss_qp {
375 u32 tirn;
376};
377
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200378struct mlx5_ib_rq {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200379 struct mlx5_ib_qp_base base;
380 struct mlx5_ib_wq *rq;
381 struct mlx5_ib_ubuffer ubuffer;
382 struct mlx5_db *doorbell;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200383 u32 tirn;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200384 u8 state;
Noa Osheroviche4cc4fa2017-01-18 15:40:03 +0200385 u32 flags;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200386};
387
388struct mlx5_ib_sq {
389 struct mlx5_ib_qp_base base;
390 struct mlx5_ib_wq *sq;
391 struct mlx5_ib_ubuffer ubuffer;
392 struct mlx5_db *doorbell;
Mark Blochb96c9dd2018-01-29 10:40:37 +0000393 struct mlx5_flow_handle *flow_rule;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200394 u32 tisn;
395 u8 state;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200396};
397
398struct mlx5_ib_raw_packet_qp {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200399 struct mlx5_ib_sq sq;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200400 struct mlx5_ib_rq rq;
401};
402
Eli Cohen5fe9dec2017-01-03 23:55:25 +0200403struct mlx5_bf {
404 int buf_size;
405 unsigned long offset;
406 struct mlx5_sq_bfreg *bfreg;
407};
408
Moni Shouab4aaa1f2018-01-02 16:19:31 +0200409struct mlx5_ib_dct {
410 struct mlx5_core_dct mdct;
411 u32 *in;
412};
413
Eli Cohene126ba92013-07-07 17:25:49 +0300414struct mlx5_ib_qp {
415 struct ib_qp ibqp;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200416 union {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200417 struct mlx5_ib_qp_trans trans_qp;
418 struct mlx5_ib_raw_packet_qp raw_packet_qp;
Yishai Hadas28d61372016-05-23 15:20:56 +0300419 struct mlx5_ib_rss_qp rss_qp;
Moni Shouab4aaa1f2018-01-02 16:19:31 +0200420 struct mlx5_ib_dct dct;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200421 };
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200422 struct mlx5_frag_buf buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300423
424 struct mlx5_db db;
425 struct mlx5_ib_wq rq;
426
Eli Cohene126ba92013-07-07 17:25:49 +0300427 u8 sq_signal_bits;
Max Gurtovoy6e8484c2017-05-28 10:53:11 +0300428 u8 next_fence;
Eli Cohene126ba92013-07-07 17:25:49 +0300429 struct mlx5_ib_wq sq;
430
Eli Cohene126ba92013-07-07 17:25:49 +0300431 /* serialize qp state modifications
432 */
433 struct mutex mutex;
Leon Romanovskya8f3ea62020-04-27 18:46:17 +0300434 /* cached variant of create_flags from struct ib_qp_init_attr */
Eli Cohene126ba92013-07-07 17:25:49 +0300435 u32 flags;
436 u8 port;
Eli Cohene126ba92013-07-07 17:25:49 +0300437 u8 state;
Eli Cohene126ba92013-07-07 17:25:49 +0300438 int max_inline_data;
Eli Cohen5fe9dec2017-01-03 23:55:25 +0200439 struct mlx5_bf bf;
Leon Romanovsky2be08c32020-04-27 18:46:13 +0300440 u8 has_rq:1;
441 u8 is_rss:1;
Eli Cohene126ba92013-07-07 17:25:49 +0300442
443 /* only for user space QPs. For kernel
444 * we have it from the bf object
445 */
Eli Cohen2f5ff262017-01-03 23:55:21 +0200446 int bfregn;
Eli Cohene126ba92013-07-07 17:25:49 +0300447
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300448 struct list_head qps_list;
449 struct list_head cq_recv_list;
450 struct list_head cq_send_list;
Bodong Wang61147f32018-03-19 15:10:30 +0200451 struct mlx5_rate_limit rl;
Yishai Hadasc2e53b22017-06-08 16:15:08 +0300452 u32 underlay_qpn;
Mark Bloch175edba2018-09-17 13:30:48 +0300453 u32 flags_en;
Leon Romanovsky7aede1a22020-04-27 18:46:20 +0300454 /*
455 * IB/core doesn't store low-level QP types, so
456 * store both MLX and IBTA types in the field below.
457 * IB_QPT_DRIVER will be break to DCI/DCT subtypes.
458 */
459 enum ib_qp_type type;
Mark Zhangd14133d2019-07-02 13:02:36 +0300460 /* A flag to indicate if there's a new counter is configured
461 * but not take effective
462 */
463 u32 counter_pending;
Eli Cohene126ba92013-07-07 17:25:49 +0300464};
465
466struct mlx5_ib_cq_buf {
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200467 struct mlx5_frag_buf_ctrl fbc;
Tariq Toukan4972e6f2018-09-12 15:36:41 +0300468 struct mlx5_frag_buf frag_buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300469 struct ib_umem *umem;
470 int cqe_size;
Eli Cohenbde51582014-01-14 17:45:18 +0200471 int nent;
Eli Cohene126ba92013-07-07 17:25:49 +0300472};
473
Haggai Eran968e78d2014-12-11 17:04:11 +0200474struct mlx5_umr_wr {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100475 struct ib_send_wr wr;
Artemy Kovalyov31616252017-01-02 11:37:42 +0200476 u64 virt_addr;
477 u64 offset;
Haggai Eran968e78d2014-12-11 17:04:11 +0200478 struct ib_pd *pd;
479 unsigned int page_shift;
Artemy Kovalyov31616252017-01-02 11:37:42 +0200480 unsigned int xlt_size;
Maor Gottliebb216af42016-11-27 15:18:22 +0200481 u64 length;
Haggai Eran968e78d2014-12-11 17:04:11 +0200482 int access_flags;
483 u32 mkey;
Yishai Hadas6a053952019-07-23 09:57:25 +0300484 u8 ignore_free_state:1;
Haggai Eran968e78d2014-12-11 17:04:11 +0200485};
486
Bart Van Asschef696bf62018-07-18 09:25:14 -0700487static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100488{
489 return container_of(wr, struct mlx5_umr_wr, wr);
490}
491
Eli Cohene126ba92013-07-07 17:25:49 +0300492struct mlx5_shared_mr_info {
493 int mr_id;
494 struct ib_umem *umem;
495};
496
Guy Levi7a0c8f42017-10-19 08:25:53 +0300497enum mlx5_ib_cq_pr_flags {
498 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
499};
500
Eli Cohene126ba92013-07-07 17:25:49 +0300501struct mlx5_ib_cq {
502 struct ib_cq ibcq;
503 struct mlx5_core_cq mcq;
504 struct mlx5_ib_cq_buf buf;
505 struct mlx5_db db;
506
507 /* serialize access to the CQ
508 */
509 spinlock_t lock;
510
511 /* protect resize cq
512 */
513 struct mutex resize_mutex;
Eli Cohenbde51582014-01-14 17:45:18 +0200514 struct mlx5_ib_cq_buf *resize_buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300515 struct ib_umem *resize_umem;
516 int cqe_size;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300517 struct list_head list_send_qp;
518 struct list_head list_recv_qp;
Leon Romanovsky051f2632015-12-20 12:16:11 +0200519 u32 create_flags;
Haggai Eran25361e02016-02-29 15:45:08 +0200520 struct list_head wc_list;
521 enum ib_cq_notify_flags notify_flags;
522 struct work_struct notify_work;
Guy Levi7a0c8f42017-10-19 08:25:53 +0300523 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
Haggai Eran25361e02016-02-29 15:45:08 +0200524};
525
526struct mlx5_ib_wc {
527 struct ib_wc wc;
528 struct list_head list;
Eli Cohene126ba92013-07-07 17:25:49 +0300529};
530
531struct mlx5_ib_srq {
532 struct ib_srq ibsrq;
533 struct mlx5_core_srq msrq;
Yonatan Cohen388ca8b2018-01-02 16:08:06 +0200534 struct mlx5_frag_buf buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300535 struct mlx5_db db;
Guy Levi20e5a592018-11-26 08:15:39 +0200536 struct mlx5_frag_buf_ctrl fbc;
Eli Cohene126ba92013-07-07 17:25:49 +0300537 u64 *wrid;
538 /* protect SRQ hanlding
539 */
540 spinlock_t lock;
541 int head;
542 int tail;
543 u16 wqe_ctr;
544 struct ib_umem *umem;
545 /* serialize arming a SRQ
546 */
547 struct mutex mutex;
548 int wq_sig;
549};
550
551struct mlx5_ib_xrcd {
552 struct ib_xrcd ibxrcd;
553 u32 xrcdn;
554};
555
Haggai Erancc149f752014-12-11 17:04:21 +0200556enum mlx5_ib_mtt_access_flags {
557 MLX5_IB_MTT_READ = (1 << 0),
558 MLX5_IB_MTT_WRITE = (1 << 1),
559};
560
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200561struct mlx5_user_mmap_entry {
562 struct rdma_user_mmap_entry rdma_entry;
563 u8 mmap_flag;
564 u64 address;
Yishai Hadas7be76be2019-12-12 13:09:27 +0200565 u32 page_idx;
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200566};
567
Ariel Levkovich24da0012018-04-05 18:53:27 +0300568struct mlx5_ib_dm {
569 struct ib_dm ibdm;
570 phys_addr_t dev_addr;
Ariel Levkovich3b113a12019-05-05 17:07:11 +0300571 u32 type;
572 size_t size;
Ariel Levkovich25c13322019-05-05 17:07:13 +0300573 union {
574 struct {
575 u32 obj_id;
576 } icm_dm;
577 /* other dm types specific params should be added here */
578 };
Yishai Hadasdc2316e2019-12-12 12:02:37 +0200579 struct mlx5_user_mmap_entry mentry;
Ariel Levkovich24da0012018-04-05 18:53:27 +0300580};
581
Haggai Erancc149f752014-12-11 17:04:21 +0200582#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
583
Ariel Levkovich3b113a12019-05-05 17:07:11 +0300584#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
585 IB_ACCESS_REMOTE_WRITE |\
586 IB_ACCESS_REMOTE_READ |\
587 IB_ACCESS_REMOTE_ATOMIC |\
588 IB_ZERO_BASED)
Ariel Levkovich6c29f572018-04-05 18:53:29 +0300589
Ariel Levkovich25c13322019-05-05 17:07:13 +0300590#define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
591 IB_ACCESS_REMOTE_WRITE |\
592 IB_ACCESS_REMOTE_READ |\
593 IB_ZERO_BASED)
594
Erez Alfasia3de94e2019-10-16 09:23:05 +0300595#define mlx5_update_odp_stats(mr, counter_name, value) \
596 atomic64_add(value, &((mr)->odp_stats.counter_name))
597
Eli Cohene126ba92013-07-07 17:25:49 +0300598struct mlx5_ib_mr {
599 struct ib_mr ibmr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300600 void *descs;
601 dma_addr_t desc_map;
602 int ndescs;
Max Gurtovoy6c984472019-06-11 18:52:42 +0300603 int data_length;
604 int meta_ndescs;
605 int meta_length;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300606 int max_descs;
607 int desc_size;
Sagi Grimbergb005d312016-02-29 19:07:33 +0200608 int access_mode;
Matan Baraka606b0f2016-02-29 18:05:28 +0200609 struct mlx5_core_mkey mmkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300610 struct ib_umem *umem;
611 struct mlx5_shared_mr_info *smr_info;
612 struct list_head list;
Jason Gunthorpeb91e1752020-03-10 10:22:32 +0200613 unsigned int order;
614 struct mlx5_cache_ent *cache_ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300615 int npages;
Eli Cohen746b5582013-10-23 09:53:14 +0300616 struct mlx5_ib_dev *dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300617 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
Sagi Grimberg3121e3c2014-02-23 14:19:06 +0200618 struct mlx5_core_sig_ctx *sig;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300619 void *descs_alloc;
Noa Osherovich56e11d62016-02-29 16:46:51 +0200620 int access_flags; /* Needed for rereg MR */
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200621
622 struct mlx5_ib_mr *parent;
Israel Rukshinde0ae952019-06-11 18:52:55 +0300623 /* Needed for IB_MR_TYPE_INTEGRITY */
624 struct mlx5_ib_mr *pi_mr;
625 struct mlx5_ib_mr *klm_mr;
626 struct mlx5_ib_mr *mtt_mr;
Max Gurtovoy2563e2f2019-06-11 18:52:56 +0300627 u64 data_iova;
Israel Rukshinde0ae952019-06-11 18:52:55 +0300628 u64 pi_iova;
629
Jason Gunthorpe423f52d2019-10-09 13:09:29 -0300630 /* For ODP and implicit */
Jason Gunthorpe5256edc2019-10-09 13:09:32 -0300631 atomic_t num_deferred_work;
Artemy Kovalyovde5ed002020-02-27 13:39:18 +0200632 wait_queue_head_t q_deferred_work;
Jason Gunthorpe423f52d2019-10-09 13:09:29 -0300633 struct xarray implicit_children;
Jason Gunthorpe5256edc2019-10-09 13:09:32 -0300634 union {
635 struct rcu_head rcu;
636 struct list_head elm;
637 struct work_struct work;
638 } odp_destroy;
Erez Alfasia3de94e2019-10-16 09:23:05 +0300639 struct ib_odp_counters odp_stats;
Erez Alfasie1b95ae2019-10-16 09:23:07 +0300640 bool is_odp_implicit;
Jason Gunthorpe423f52d2019-10-09 13:09:29 -0300641
642 struct mlx5_async_work cb_work;
Eli Cohene126ba92013-07-07 17:25:49 +0300643};
644
Leon Romanovsky8b4d5bc2019-01-08 16:07:25 +0200645static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
646{
647 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
648 mr->umem->is_odp;
649}
650
Matan Barakd2370e02016-02-29 18:05:30 +0200651struct mlx5_ib_mw {
652 struct ib_mw ibmw;
653 struct mlx5_core_mkey mmkey;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +0300654 int ndescs;
Eli Cohene126ba92013-07-07 17:25:49 +0300655};
656
Yishai Hadas534fd7a2019-01-13 16:01:17 +0200657struct mlx5_ib_devx_mr {
658 struct mlx5_core_mkey mmkey;
659 int ndescs;
Yishai Hadas534fd7a2019-01-13 16:01:17 +0200660};
661
Shachar Raindela74d2412014-05-22 14:50:12 +0300662struct mlx5_ib_umr_context {
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100663 struct ib_cqe cqe;
Shachar Raindela74d2412014-05-22 14:50:12 +0300664 enum ib_wc_status status;
665 struct completion done;
666};
667
Eli Cohene126ba92013-07-07 17:25:49 +0300668struct umr_common {
669 struct ib_pd *pd;
670 struct ib_cq *cq;
671 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300672 /* control access to UMR QP
673 */
674 struct semaphore sem;
675};
676
677enum {
678 MLX5_FMR_INVALID,
679 MLX5_FMR_VALID,
680 MLX5_FMR_BUSY,
681};
682
Eli Cohene126ba92013-07-07 17:25:49 +0300683struct mlx5_cache_ent {
684 struct list_head head;
685 /* sync access to the cahce entry
686 */
687 spinlock_t lock;
688
689
Eli Cohene126ba92013-07-07 17:25:49 +0300690 char name[4];
691 u32 order;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200692 u32 xlt;
693 u32 access_mode;
694 u32 page;
695
Jason Gunthorpeb9358bd2020-03-10 10:22:36 +0200696 u8 disabled:1;
Jason Gunthorpe1c78a212020-03-10 10:22:37 +0200697 u8 fill_to_high_water:1;
Jason Gunthorpeb9358bd2020-03-10 10:22:36 +0200698
Jason Gunthorpe7c8691a2020-03-10 10:22:31 +0200699 /*
700 * - available_mrs is the length of list head, ie the number of MRs
701 * available for immediate allocation.
702 * - total_mrs is available_mrs plus all in use MRs that could be
703 * returned to the cache.
704 * - limit is the low water mark for available_mrs, 2* limit is the
705 * upper water mark.
706 * - pending is the number of MRs currently being created
707 */
708 u32 total_mrs;
709 u32 available_mrs;
710 u32 limit;
711 u32 pending;
712
713 /* Statistics */
Eli Cohene126ba92013-07-07 17:25:49 +0300714 u32 miss;
Eli Cohene126ba92013-07-07 17:25:49 +0300715
Eli Cohene126ba92013-07-07 17:25:49 +0300716 struct mlx5_ib_dev *dev;
717 struct work_struct work;
718 struct delayed_work dwork;
719};
720
721struct mlx5_mr_cache {
722 struct workqueue_struct *wq;
723 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
Eli Cohene126ba92013-07-07 17:25:49 +0300724 struct dentry *root;
725 unsigned long last_add;
726};
727
Haggai Erand16e91d2016-02-29 15:45:05 +0200728struct mlx5_ib_gsi_qp;
729
730struct mlx5_ib_port_resources {
Haggai Eran7722f472016-02-29 15:45:07 +0200731 struct mlx5_ib_resources *devr;
Haggai Erand16e91d2016-02-29 15:45:05 +0200732 struct mlx5_ib_gsi_qp *gsi;
Haggai Eran7722f472016-02-29 15:45:07 +0200733 struct work_struct pkey_change_work;
Haggai Erand16e91d2016-02-29 15:45:05 +0200734};
735
Eli Cohene126ba92013-07-07 17:25:49 +0300736struct mlx5_ib_resources {
737 struct ib_cq *c0;
738 struct ib_xrcd *x0;
739 struct ib_xrcd *x1;
740 struct ib_pd *p0;
741 struct ib_srq *s0;
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +0300742 struct ib_srq *s1;
Haggai Erand16e91d2016-02-29 15:45:05 +0200743 struct mlx5_ib_port_resources ports[2];
744 /* Protects changes to the port resources */
745 struct mutex mutex;
Eli Cohene126ba92013-07-07 17:25:49 +0300746};
747
Parav Pandite1f24a72017-04-16 07:29:29 +0300748struct mlx5_ib_counters {
Kamal Heib7c16f472017-01-18 15:25:09 +0200749 const char **names;
750 size_t *offsets;
Parav Pandite1f24a72017-04-16 07:29:29 +0300751 u32 num_q_counters;
752 u32 num_cong_counters;
Talat Batheesh9f876f32018-06-21 15:37:56 +0300753 u32 num_ext_ppcnt_counters;
Kamal Heib7c16f472017-01-18 15:25:09 +0200754 u16 set_id;
755};
756
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200757struct mlx5_ib_multiport_info;
758
759struct mlx5_ib_multiport {
760 struct mlx5_ib_multiport_info *mpi;
761 /* To be held when accessing the multiport info */
762 spinlock_t mpi_lock;
763};
764
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200765struct mlx5_roce {
766 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
767 * netdev pointer
768 */
769 rwlock_t netdev_lock;
770 struct net_device *netdev;
771 struct notifier_block nb;
Majd Dibbinyc6a21c32018-08-28 14:29:05 +0300772 atomic_t tx_port_affinity;
Moni Shouafd65f1b2017-05-30 09:56:05 +0300773 enum ib_port_state last_port_state;
Daniel Jurgens7fd8aef2018-01-04 17:25:35 +0200774 struct mlx5_ib_dev *dev;
775 u8 native_port_num;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200776};
777
Mark Bloch95579e72019-03-28 15:27:33 +0200778struct mlx5_ib_port {
779 struct mlx5_ib_counters cnts;
780 struct mlx5_ib_multiport mp;
781 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
782 struct mlx5_roce roce;
Mark Bloch6a4d00b2019-03-28 15:27:37 +0200783 struct mlx5_eswitch_rep *rep;
Mark Bloch95579e72019-03-28 15:27:33 +0200784};
785
Parav Pandit4a2da0b2017-05-30 10:05:15 +0300786struct mlx5_ib_dbg_param {
787 int offset;
788 struct mlx5_ib_dev *dev;
789 struct dentry *dentry;
Parav Pandita9e546e2018-01-04 17:25:39 +0200790 u8 port_num;
Parav Pandit4a2da0b2017-05-30 10:05:15 +0300791};
792
793enum mlx5_ib_dbg_cc_types {
794 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
795 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
796 MLX5_IB_DBG_CC_RP_TIME_RESET,
797 MLX5_IB_DBG_CC_RP_BYTE_RESET,
798 MLX5_IB_DBG_CC_RP_THRESHOLD,
799 MLX5_IB_DBG_CC_RP_AI_RATE,
Parav Pandit9e3aaf62020-02-27 14:52:46 +0200800 MLX5_IB_DBG_CC_RP_MAX_RATE,
Parav Pandit4a2da0b2017-05-30 10:05:15 +0300801 MLX5_IB_DBG_CC_RP_HAI_RATE,
802 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
803 MLX5_IB_DBG_CC_RP_MIN_RATE,
804 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
805 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
806 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
807 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
808 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
809 MLX5_IB_DBG_CC_RP_GD,
Parav Pandit9e3aaf62020-02-27 14:52:46 +0200810 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
Parav Pandit4a2da0b2017-05-30 10:05:15 +0300811 MLX5_IB_DBG_CC_NP_CNP_DSCP,
812 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
813 MLX5_IB_DBG_CC_NP_CNP_PRIO,
814 MLX5_IB_DBG_CC_MAX,
815};
816
817struct mlx5_ib_dbg_cc_params {
818 struct dentry *root;
819 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
820};
821
Maor Gottlieb03404e82017-05-30 10:29:13 +0300822enum {
823 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
824};
825
826struct mlx5_ib_delay_drop {
827 struct mlx5_ib_dev *dev;
828 struct work_struct delay_drop_work;
829 /* serialize setting of delay drop */
830 struct mutex lock;
831 u32 timeout;
832 bool activate;
Maor Gottliebfe248c32017-05-30 10:29:14 +0300833 atomic_t events_cnt;
834 atomic_t rqs_cnt;
Greg Kroah-Hartman09b09652019-11-04 08:38:07 +0100835 struct dentry *dir_debugfs;
Maor Gottlieb03404e82017-05-30 10:29:13 +0300836};
837
Mark Bloch16c19752018-01-01 13:06:58 +0200838enum mlx5_ib_stages {
839 MLX5_IB_STAGE_INIT,
Mark Bloch9a4ca382018-01-16 14:42:35 +0000840 MLX5_IB_STAGE_FLOW_DB,
Mark Bloch16c19752018-01-01 13:06:58 +0200841 MLX5_IB_STAGE_CAPS,
Mark Bloch8e6efa32017-11-06 12:22:13 +0000842 MLX5_IB_STAGE_NON_DEFAULT_CB,
Mark Bloch16c19752018-01-01 13:06:58 +0200843 MLX5_IB_STAGE_ROCE,
Leon Romanovsky333fbaa2020-04-04 10:40:24 +0300844 MLX5_IB_STAGE_QP,
Leon Romanovskyf3da6572018-11-28 20:53:41 +0200845 MLX5_IB_STAGE_SRQ,
Mark Bloch16c19752018-01-01 13:06:58 +0200846 MLX5_IB_STAGE_DEVICE_RESOURCES,
Saeed Mahameeddf097a22018-11-26 14:39:00 -0800847 MLX5_IB_STAGE_DEVICE_NOTIFIER,
Mark Bloch16c19752018-01-01 13:06:58 +0200848 MLX5_IB_STAGE_ODP,
849 MLX5_IB_STAGE_COUNTERS,
850 MLX5_IB_STAGE_CONG_DEBUGFS,
851 MLX5_IB_STAGE_UAR,
852 MLX5_IB_STAGE_BFREG,
Mark Bloch42cea832018-03-14 09:14:15 +0200853 MLX5_IB_STAGE_PRE_IB_REG_UMR,
Leon Romanovsky81773ce2018-11-28 20:53:39 +0200854 MLX5_IB_STAGE_WHITELIST_UID,
Mark Bloch16c19752018-01-01 13:06:58 +0200855 MLX5_IB_STAGE_IB_REG,
Mark Bloch42cea832018-03-14 09:14:15 +0200856 MLX5_IB_STAGE_POST_IB_REG_UMR,
Mark Bloch16c19752018-01-01 13:06:58 +0200857 MLX5_IB_STAGE_DELAY_DROP,
858 MLX5_IB_STAGE_CLASS_ATTR,
Mark Bloch16c19752018-01-01 13:06:58 +0200859 MLX5_IB_STAGE_MAX,
860};
861
862struct mlx5_ib_stage {
863 int (*init)(struct mlx5_ib_dev *dev);
864 void (*cleanup)(struct mlx5_ib_dev *dev);
865};
866
867#define STAGE_CREATE(_stage, _init, _cleanup) \
868 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
869
870struct mlx5_ib_profile {
871 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
872};
873
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200874struct mlx5_ib_multiport_info {
875 struct list_head list;
876 struct mlx5_ib_dev *ibdev;
877 struct mlx5_core_dev *mdev;
Saeed Mahameeddf097a22018-11-26 14:39:00 -0800878 struct notifier_block mdev_events;
Daniel Jurgens32f69e42018-01-04 17:25:36 +0200879 struct completion unref_comp;
880 u64 sys_image_guid;
881 u32 mdev_refcnt;
882 bool is_master;
883 bool unaffiliate;
884};
885
Aviad Yehezkelc6475a02018-03-28 09:27:50 +0300886struct mlx5_ib_flow_action {
887 struct ib_flow_action ib_action;
888 union {
889 struct {
890 u64 ib_flags;
891 struct mlx5_accel_esp_xfrm *ctx;
892 } esp_aes_gcm;
Mark Blochb4749bf2018-08-28 14:18:51 +0300893 struct {
894 struct mlx5_ib_dev *dev;
895 u32 sub_type;
Maor Gottlieb2b688ea2019-08-15 13:54:17 +0300896 union {
897 struct mlx5_modify_hdr *modify_hdr;
898 struct mlx5_pkt_reformat *pkt_reformat;
899 };
Mark Blochb4749bf2018-08-28 14:18:51 +0300900 } flow_action_raw;
Aviad Yehezkelc6475a02018-03-28 09:27:50 +0300901 };
902};
903
Ariel Levkovich3b113a12019-05-05 17:07:11 +0300904struct mlx5_dm {
Ariel Levkovich24da0012018-04-05 18:53:27 +0300905 struct mlx5_core_dev *dev;
Ariel Levkovich3b113a12019-05-05 17:07:11 +0300906 /* This lock is used to protect the access to the shared
907 * allocation map when concurrent requests by different
908 * processes are handled.
909 */
910 spinlock_t lock;
Ariel Levkovich24da0012018-04-05 18:53:27 +0300911 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
912};
913
Raed Salem5e95af52018-05-31 16:43:40 +0300914struct mlx5_read_counters_attr {
915 struct mlx5_fc *hw_cntrs_hndl;
916 u64 *out;
917 u32 flags;
918};
919
Raed Salem3b3233f2018-05-31 16:43:39 +0300920enum mlx5_ib_counters_type {
921 MLX5_IB_COUNTERS_FLOW,
922};
923
Raed Salemb29e2a12018-05-31 16:43:38 +0300924struct mlx5_ib_mcounters {
925 struct ib_counters ibcntrs;
Raed Salem3b3233f2018-05-31 16:43:39 +0300926 enum mlx5_ib_counters_type type;
Raed Salem5e95af52018-05-31 16:43:40 +0300927 /* number of counters supported for this counters type */
928 u32 counters_num;
929 struct mlx5_fc *hw_cntrs_hndl;
930 /* read function for this counters type */
931 int (*read_counters)(struct ib_device *ibdev,
932 struct mlx5_read_counters_attr *read_attr);
Raed Salem3b3233f2018-05-31 16:43:39 +0300933 /* max index set as part of create_flow */
934 u32 cntrs_max_index;
935 /* number of counters data entries (<description,index> pair) */
936 u32 ncounters;
937 /* counters data array for descriptions and indexes */
938 struct mlx5_ib_flow_counters_desc *counters_data;
939 /* protects access to mcounters internal data */
940 struct mutex mcntrs_mutex;
Raed Salemb29e2a12018-05-31 16:43:38 +0300941};
942
943static inline struct mlx5_ib_mcounters *
944to_mcounters(struct ib_counters *ibcntrs)
945{
946 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
947}
948
Mark Bloch2ea26202018-09-06 17:27:03 +0300949int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
950 bool is_egress,
951 struct mlx5_flow_act *action);
Mark Blocha560f1d2018-09-17 13:30:47 +0300952struct mlx5_ib_lb_state {
953 /* protect the user_td */
954 struct mutex mutex;
955 u32 user_td;
Mark Bloch0042f9e2018-09-17 13:30:49 +0300956 int qps;
957 bool enabled;
Mark Blocha560f1d2018-09-17 13:30:47 +0300958};
959
Saeed Mahameedd5d284b2018-11-19 10:52:41 -0800960struct mlx5_ib_pf_eq {
Yuval Avneryca390792019-06-10 23:38:23 +0000961 struct notifier_block irq_nb;
Saeed Mahameedd5d284b2018-11-19 10:52:41 -0800962 struct mlx5_ib_dev *dev;
963 struct mlx5_eq *core;
964 struct work_struct work;
965 spinlock_t lock; /* Pagefaults spinlock */
966 struct workqueue_struct *wq;
967 mempool_t *pool;
968};
969
Yishai Hadase337dd52019-06-30 19:23:30 +0300970struct mlx5_devx_event_table {
971 struct mlx5_nb devx_nb;
972 /* serialize updating the event_xa */
973 struct mutex event_xa_lock;
974 struct xarray event_xa;
975};
976
Yishai Hadasf164be82019-12-12 13:09:26 +0200977struct mlx5_var_table {
978 /* serialize updating the bitmap */
979 struct mutex bitmap_lock;
980 unsigned long *bitmap;
981 u64 hw_start_addr;
982 u32 stride_size;
983 u64 num_var_hw_entries;
984};
985
Eli Cohene126ba92013-07-07 17:25:49 +0300986struct mlx5_ib_dev {
987 struct ib_device ib_dev;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300988 struct mlx5_core_dev *mdev;
Saeed Mahameeddf097a22018-11-26 14:39:00 -0800989 struct notifier_block mdev_events;
Eli Cohene126ba92013-07-07 17:25:49 +0300990 int num_ports;
Eli Cohene126ba92013-07-07 17:25:49 +0300991 /* serialize update of capability mask
992 */
993 struct mutex cap_mask_mutex;
Leon Romanovsky4b2a6732019-10-02 15:25:14 +0300994 u8 ib_active:1;
Leon Romanovsky4b2a6732019-10-02 15:25:14 +0300995 u8 is_rep:1;
996 u8 lag_active:1;
Michael Guralnik11f552e2019-06-10 15:21:24 +0300997 u8 wc_support:1;
Jason Gunthorpeb9358bd2020-03-10 10:22:36 +0200998 u8 fill_delay;
Eli Cohene126ba92013-07-07 17:25:49 +0300999 struct umr_common umrc;
1000 /* sync used page count stats
1001 */
Eli Cohene126ba92013-07-07 17:25:49 +03001002 struct mlx5_ib_resources devr;
Saeed Mahameedfc6a9f82020-03-10 10:22:28 +02001003
Saeed Mahameedf743ff32020-03-10 10:22:29 +02001004 atomic_t mkey_var;
Eli Cohene126ba92013-07-07 17:25:49 +03001005 struct mlx5_mr_cache cache;
Eli Cohen746b5582013-10-23 09:53:14 +03001006 struct timer_list delay_timer;
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001007 /* Prevents soft lock on massive reg MRs */
1008 struct mutex slow_path_mutex;
Haggai Eran8cdd3122014-12-11 17:04:20 +02001009 struct ib_odp_caps odp_caps;
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001010 u64 odp_max_size;
Saeed Mahameedd5d284b2018-11-19 10:52:41 -08001011 struct mlx5_ib_pf_eq odp_pf_eq;
1012
Haggai Eran6aec21f2014-12-11 17:04:23 +02001013 /*
1014 * Sleepable RCU that prevents destruction of MRs while they are still
1015 * being used by a page fault handler.
1016 */
Jason Gunthorpe806b1012019-10-09 13:09:23 -03001017 struct srcu_struct odp_srcu;
1018 struct xarray odp_mkeys;
1019
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001020 u32 null_mkey;
Mark Bloch9a4ca382018-01-16 14:42:35 +00001021 struct mlx5_ib_flow_db *flow_db;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001022 /* protect resources needed as part of reset flow */
1023 spinlock_t reset_flow_resource_lock;
1024 struct list_head qp_list;
Mark Bloch0837e862016-06-17 15:10:55 +03001025 /* Array with num_ports elements */
1026 struct mlx5_ib_port *port;
Huy Nguyenc85023e2017-05-30 09:42:54 +03001027 struct mlx5_sq_bfreg bfreg;
Michael Guralnik11f552e2019-06-10 15:21:24 +03001028 struct mlx5_sq_bfreg wc_bfreg;
Huy Nguyenc85023e2017-05-30 09:42:54 +03001029 struct mlx5_sq_bfreg fp_bfreg;
Maor Gottlieb03404e82017-05-30 10:29:13 +03001030 struct mlx5_ib_delay_drop delay_drop;
Mark Bloch16c19752018-01-01 13:06:58 +02001031 const struct mlx5_ib_profile *profile;
Huy Nguyenc85023e2017-05-30 09:42:54 +03001032
Mark Blocha560f1d2018-09-17 13:30:47 +03001033 struct mlx5_ib_lb_state lb;
Huy Nguyenc85023e2017-05-30 09:42:54 +03001034 u8 umr_fence;
Daniel Jurgens32f69e42018-01-04 17:25:36 +02001035 struct list_head ib_dev_list;
1036 u64 sys_image_guid;
Ariel Levkovich3b113a12019-05-05 17:07:11 +03001037 struct mlx5_dm dm;
Yishai Hadas76dc5a82018-09-20 21:45:19 +03001038 u16 devx_whitelist_uid;
Leon Romanovskyf3da6572018-11-28 20:53:41 +02001039 struct mlx5_srq_table srq_table;
Leon Romanovsky333fbaa2020-04-04 10:40:24 +03001040 struct mlx5_qp_table qp_table;
Jason Gunthorpee3554772019-01-18 16:33:10 -08001041 struct mlx5_async_ctx async_ctx;
Yishai Hadase337dd52019-06-30 19:23:30 +03001042 struct mlx5_devx_event_table devx_event_table;
Yishai Hadasf164be82019-12-12 13:09:26 +02001043 struct mlx5_var_table var_table;
Jason Gunthorpe50211ec2019-10-09 13:09:22 -03001044
1045 struct xarray sig_mrs;
Eli Cohene126ba92013-07-07 17:25:49 +03001046};
1047
1048static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1049{
1050 return container_of(mcq, struct mlx5_ib_cq, mcq);
1051}
1052
1053static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1054{
1055 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1056}
1057
1058static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1059{
1060 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1061}
1062
Jason Gunthorpee79c9c62019-04-01 17:08:23 -03001063static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1064{
1065 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1066 udata, struct mlx5_ib_ucontext, ibucontext);
1067
1068 return to_mdev(context->ibucontext.device);
1069}
1070
Eli Cohene126ba92013-07-07 17:25:49 +03001071static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1072{
1073 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1074}
1075
1076static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1077{
majd@mellanox.com19098df2016-01-14 19:13:03 +02001078 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
Eli Cohene126ba92013-07-07 17:25:49 +03001079}
1080
Yishai Hadas350d0e42016-08-28 14:58:18 +03001081static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1082{
1083 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1084}
1085
Matan Baraka606b0f2016-02-29 18:05:28 +02001086static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001087{
Matan Baraka606b0f2016-02-29 18:05:28 +02001088 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001089}
1090
Eli Cohene126ba92013-07-07 17:25:49 +03001091static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1092{
1093 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1094}
1095
1096static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1097{
1098 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1099}
1100
1101static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1102{
1103 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1104}
1105
Yishai Hadas79b20a62016-05-23 15:20:50 +03001106static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1107{
1108 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1109}
1110
Yishai Hadasc5f90922016-05-23 15:20:53 +03001111static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1112{
1113 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1114}
1115
Eli Cohene126ba92013-07-07 17:25:49 +03001116static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1117{
1118 return container_of(msrq, struct mlx5_ib_srq, msrq);
1119}
1120
Ariel Levkovich24da0012018-04-05 18:53:27 +03001121static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
1122{
1123 return container_of(ibdm, struct mlx5_ib_dm, ibdm);
1124}
1125
Eli Cohene126ba92013-07-07 17:25:49 +03001126static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1127{
1128 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1129}
1130
Matan Barakd2370e02016-02-29 18:05:30 +02001131static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1132{
1133 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1134}
1135
Aviad Yehezkelc6475a02018-03-28 09:27:50 +03001136static inline struct mlx5_ib_flow_action *
1137to_mflow_act(struct ib_flow_action *ibact)
1138{
1139 return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1140}
1141
Yishai Hadasdc2316e2019-12-12 12:02:37 +02001142static inline struct mlx5_user_mmap_entry *
1143to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1144{
1145 return container_of(rdma_entry,
1146 struct mlx5_user_mmap_entry, rdma_entry);
1147}
1148
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001149int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
1150 struct ib_udata *udata, unsigned long virt,
Eli Cohene126ba92013-07-07 17:25:49 +03001151 struct mlx5_db *db);
1152void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1153void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1154void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1155void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
Maor Gottliebfa5d0102020-04-30 22:21:42 +03001156int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
Leon Romanovskyd3456912019-04-03 16:42:42 +03001157 struct ib_udata *udata);
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001158int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
Leon Romanovskyd3456912019-04-03 16:42:42 +03001159void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03001160int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1161 struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +03001162int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1163 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1164int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03001165void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001166int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1167 const struct ib_recv_wr **bad_wr);
Mark Bloch0042f9e2018-09-17 13:30:49 +03001168int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1169void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
Eli Cohene126ba92013-07-07 17:25:49 +03001170struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1171 struct ib_qp_init_attr *init_attr,
1172 struct ib_udata *udata);
1173int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1174 int attr_mask, struct ib_udata *udata);
1175int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1176 struct ib_qp_init_attr *qp_init_attr);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001177int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
Yishai Hadasd0e84c02018-06-19 10:43:55 +03001178void mlx5_ib_drain_sq(struct ib_qp *qp);
1179void mlx5_ib_drain_rq(struct ib_qp *qp);
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001180int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1181 const struct ib_send_wr **bad_wr);
1182int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1183 const struct ib_recv_wr **bad_wr);
Moni Shouada9ee9d2020-01-15 14:43:34 +02001184int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1185 size_t buflen, size_t *bc);
1186int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1187 size_t buflen, size_t *bc);
1188int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1189 size_t buflen, size_t *bc);
Leon Romanovskye39afe32019-05-28 14:37:29 +03001190int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1191 struct ib_udata *udata);
Leon Romanovskya52c8e22019-05-28 14:37:28 +03001192void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +03001193int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1194int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1195int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1196int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1197struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1198struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1199 u64 virt_addr, int access_flags,
1200 struct ib_udata *udata);
Moni Shoua813e90b2018-12-11 13:37:53 +02001201int mlx5_ib_advise_mr(struct ib_pd *pd,
1202 enum ib_uverbs_advise_mr_advice advice,
1203 u32 flags,
1204 struct ib_sge *sg_list,
1205 u32 num_sge,
1206 struct uverbs_attr_bundle *attrs);
Matan Barakd2370e02016-02-29 18:05:30 +02001207struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1208 struct ib_udata *udata);
1209int mlx5_ib_dealloc_mw(struct ib_mw *mw);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001210int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1211 int page_shift, int flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001212struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +02001213 struct ib_udata *udata,
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001214 int access_flags);
1215void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
Jason Gunthorpe09689702019-10-09 13:09:34 -03001216void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001217int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1218 u64 length, u64 virt_addr, int access_flags,
1219 struct ib_pd *pd, struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001220int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1221struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1222 u32 max_num_sg, struct ib_udata *udata);
Max Gurtovoy6c984472019-06-11 18:52:42 +03001223struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1224 u32 max_num_sg,
1225 u32 max_num_meta_sg);
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001226int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001227 unsigned int *sg_offset);
Max Gurtovoy6c984472019-06-11 18:52:42 +03001228int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1229 int data_sg_nents, unsigned int *data_sg_offset,
1230 struct scatterlist *meta_sg, int meta_sg_nents,
1231 unsigned int *meta_sg_offset);
Eli Cohene126ba92013-07-07 17:25:49 +03001232int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -04001233 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
Leon Romanovskye26e7b82019-10-29 08:27:45 +02001234 const struct ib_mad *in, struct ib_mad *out,
1235 size_t *out_mad_size, u16 *out_mad_pkey_index);
Eli Cohene126ba92013-07-07 17:25:49 +03001236struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +03001237 struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001238int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +03001239int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
1240int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +03001241int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
1242 struct ib_smp *out_mad);
1243int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1244 __be64 *sys_image_guid);
1245int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1246 u16 *max_pkeys);
1247int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1248 u32 *vendor_id);
1249int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1250int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1251int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
1252 u16 *pkey);
1253int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
1254 union ib_gid *gid);
1255int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
1256 struct ib_port_attr *props);
Eli Cohene126ba92013-07-07 17:25:49 +03001257int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1258 struct ib_port_attr *props);
1259int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
1260void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
Majd Dibbiny762f8992016-10-27 16:36:47 +03001261void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
1262 unsigned long max_page_shift,
1263 int *count, int *shift,
Eli Cohene126ba92013-07-07 17:25:49 +03001264 int *ncont, int *order);
Haggai Eran832a6b02014-12-11 17:04:22 +02001265void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1266 int page_shift, size_t offset, size_t num_pages,
1267 __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001268void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
Haggai Erancc149f752014-12-11 17:04:21 +02001269 int page_shift, __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001270void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
Yonatan Cohen5d6ff1b2018-10-09 12:05:13 +03001271int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
Eli Cohene126ba92013-07-07 17:25:49 +03001272int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1273int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001274
Jason Gunthorpeb91e1752020-03-10 10:22:32 +02001275struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1276 unsigned int entry);
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001277void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Jason Gunthorpe09689702019-10-09 13:09:34 -03001278int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
1279
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001280int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1281 struct ib_mr_status *mr_status);
Yishai Hadas79b20a62016-05-23 15:20:50 +03001282struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1283 struct ib_wq_init_attr *init_attr,
1284 struct ib_udata *udata);
Leon Romanovskya49b1dc2019-06-12 15:27:41 +03001285void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
Yishai Hadas79b20a62016-05-23 15:20:50 +03001286int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1287 u32 wq_attr_mask, struct ib_udata *udata);
Yishai Hadasc5f90922016-05-23 15:20:53 +03001288struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
1289 struct ib_rwq_ind_table_init_attr *init_attr,
1290 struct ib_udata *udata);
1291int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
Ariel Levkovich24da0012018-04-05 18:53:27 +03001292struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
1293 struct ib_ucontext *context,
1294 struct ib_dm_alloc_attr *attr,
1295 struct uverbs_attr_bundle *attrs);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001296int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001297struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1298 struct ib_dm_mr_attr *attr,
1299 struct uverbs_attr_bundle *attrs);
Eli Cohene126ba92013-07-07 17:25:49 +03001300
Haggai Eran8cdd3122014-12-11 17:04:20 +02001301#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Saeed Mahameed938fe832015-05-28 22:28:41 +03001302void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001303int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
Saeed Mahameedd5d284b2018-11-19 10:52:41 -08001304void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001305int __init mlx5_ib_odp_init(void);
1306void mlx5_ib_odp_cleanup(void);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001307void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
Artemy Kovalyovcbe4b8f2019-12-22 14:46:47 +02001308void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1309 struct mlx5_ib_mr *mr, int flags);
Moni Shoua813e90b2018-12-11 13:37:53 +02001310
1311int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1312 enum ib_uverbs_advise_mr_advice advice,
1313 u32 flags, struct ib_sge *sg_list, u32 num_sge);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001314#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
Saeed Mahameed938fe832015-05-28 22:28:41 +03001315static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
Haggai Eran8cdd3122014-12-11 17:04:20 +02001316{
Saeed Mahameed938fe832015-05-28 22:28:41 +03001317 return;
Haggai Eran8cdd3122014-12-11 17:04:20 +02001318}
Haggai Eran6aec21f2014-12-11 17:04:23 +02001319
Haggai Eran6aec21f2014-12-11 17:04:23 +02001320static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
Saeed Mahameedd5d284b2018-11-19 10:52:41 -08001321static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
Haggai Eran6aec21f2014-12-11 17:04:23 +02001322static inline int mlx5_ib_odp_init(void) { return 0; }
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001323static inline void mlx5_ib_odp_cleanup(void) {}
1324static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
Artemy Kovalyovcbe4b8f2019-12-22 14:46:47 +02001325static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1326 struct mlx5_ib_mr *mr, int flags) {}
Haggai Eran6aec21f2014-12-11 17:04:23 +02001327
Doug Ledfordc9e585e2018-12-19 13:43:17 -05001328static inline int
1329mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1330 enum ib_uverbs_advise_mr_advice advice, u32 flags,
1331 struct ib_sge *sg_list, u32 num_sge)
Moni Shoua813e90b2018-12-11 13:37:53 +02001332{
1333 return -EOPNOTSUPP;
1334}
Haggai Eran8cdd3122014-12-11 17:04:20 +02001335#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1336
Jason Gunthorpef25a5462019-11-12 16:22:22 -04001337extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1338
Mark Blochb5ca15a2018-01-23 11:16:30 +00001339/* Needed for rep profile */
Mark Blochb5ca15a2018-01-23 11:16:30 +00001340void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1341 const struct mlx5_ib_profile *profile,
1342 int stage);
1343void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
1344 const struct mlx5_ib_profile *profile);
1345
Arnd Bergmann9967c702016-03-23 11:37:45 +01001346int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1347 u8 port, struct ifla_vf_info *info);
1348int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1349 u8 port, int state);
1350int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1351 u8 port, struct ifla_vf_stats *stats);
Danit Goldberg9c0015e2019-11-06 15:18:12 +02001352int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
1353 struct ifla_vf_guid *node_guid,
1354 struct ifla_vf_guid *port_guid);
Arnd Bergmann9967c702016-03-23 11:37:45 +01001355int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
1356 u64 guid, int type);
1357
Parav Pandit47ec3862018-06-13 10:22:06 +03001358__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
1359 const struct ib_gid_attr *attr);
Achiad Shochat2811ba52015-12-23 18:47:24 +02001360
Parav Pandita9e546e2018-01-04 17:25:39 +02001361void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
Greg Kroah-Hartman73eb8f02019-01-22 16:17:57 +01001362void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
Parav Pandit4a2da0b2017-05-30 10:05:15 +03001363
Haggai Erand16e91d2016-02-29 15:45:05 +02001364/* GSI QP helper functions */
1365struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
1366 struct ib_qp_init_attr *init_attr);
1367int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
1368int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1369 int attr_mask);
1370int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1371 int qp_attr_mask,
1372 struct ib_qp_init_attr *qp_init_attr);
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001373int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1374 const struct ib_send_wr **bad_wr);
1375int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1376 const struct ib_recv_wr **bad_wr);
Haggai Eran7722f472016-02-29 15:45:07 +02001377void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
Haggai Erand16e91d2016-02-29 15:45:05 +02001378
Haggai Eran25361e02016-02-29 15:45:08 +02001379int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1380
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001381void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1382 int bfregn);
Daniel Jurgens32f69e42018-01-04 17:25:36 +02001383struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1384struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1385 u8 ib_port_num,
1386 u8 *native_port_num);
1387void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1388 u8 port_num);
Erez Alfasie1b95ae2019-10-16 09:23:07 +03001389int mlx5_ib_fill_res_entry(struct sk_buff *msg,
1390 struct rdma_restrack_entry *res);
Erez Alfasi4061ff72019-10-16 09:23:08 +03001391int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
1392 struct rdma_restrack_entry *res);
Yishai Hadas4ed131d2017-12-24 16:31:35 +02001393
Jason Gunthorpe8889f6f2020-01-30 11:21:21 -04001394extern const struct uapi_definition mlx5_ib_devx_defs[];
1395extern const struct uapi_definition mlx5_ib_flow_defs[];
Yishai Hadas30f2fe42020-02-19 21:05:18 +02001396extern const struct uapi_definition mlx5_ib_qos_defs[];
Jason Gunthorpe8889f6f2020-01-30 11:21:21 -04001397
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001398#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
Yishai Hadasfb981532018-11-26 08:28:36 +02001399int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
Yishai Hadas76dc5a82018-09-20 21:45:19 +03001400void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
Yishai Hadase337dd52019-06-30 19:23:30 +03001401void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev);
1402void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev);
Yishai Hadas32269442018-07-23 15:25:09 +03001403struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
1404 struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
Jianbo Liubb0ee7d2019-06-25 17:47:58 +00001405 struct mlx5_flow_context *flow_context,
Mark Blochbfc5d832018-11-20 20:31:08 +02001406 struct mlx5_flow_act *flow_act, u32 counter_id,
1407 void *cmd_in, int inlen, int dest_id, int dest_type);
Yishai Hadas32269442018-07-23 15:25:09 +03001408bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
Yevgeny Kliteynik208d70f2019-11-03 16:07:23 +02001409bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
Mark Blochb4749bf2018-08-28 14:18:51 +03001410void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001411#else
1412static inline int
Yishai Hadasfb981532018-11-26 08:28:36 +02001413mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
1414 bool is_user) { return -EOPNOTSUPP; }
Yishai Hadas76dc5a82018-09-20 21:45:19 +03001415static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
Yishai Hadase337dd52019-06-30 19:23:30 +03001416static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) {}
1417static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) {}
Yishai Hadas32269442018-07-23 15:25:09 +03001418static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
1419 int *dest_type)
1420{
1421 return false;
1422}
Mark Blochb4749bf2018-08-28 14:18:51 +03001423static inline void
1424mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
1425{
1426 return;
1427};
Yishai Hadasa8b92ca2018-06-17 12:59:57 +03001428#endif
Eli Cohene126ba92013-07-07 17:25:49 +03001429static inline void init_query_mad(struct ib_smp *mad)
1430{
1431 mad->base_version = 1;
1432 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1433 mad->class_version = 1;
1434 mad->method = IB_MGMT_METHOD_GET;
1435}
1436
1437static inline u8 convert_access(int acc)
1438{
1439 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1440 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1441 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1442 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1443 MLX5_PERM_LOCAL_READ;
1444}
1445
Sagi Grimbergb6364012015-09-02 22:23:04 +03001446static inline int is_qp1(enum ib_qp_type qp_type)
1447{
Haggai Erand16e91d2016-02-29 15:45:05 +02001448 return qp_type == MLX5_IB_QPT_HW_GSI;
Sagi Grimbergb6364012015-09-02 22:23:04 +03001449}
1450
Haggai Erancc149f752014-12-11 17:04:21 +02001451#define MLX5_MAX_UMR_SHIFT 16
1452#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1453
Leon Romanovsky051f2632015-12-20 12:16:11 +02001454static inline u32 check_cq_create_flags(u32 flags)
1455{
1456 /*
1457 * It returns non-zero value for unsupported CQ
1458 * create flags, otherwise it returns zero.
1459 */
Jason Gunthorpebeb801a2018-01-26 15:16:46 -07001460 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1461 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
Leon Romanovsky051f2632015-12-20 12:16:11 +02001462}
Haggai Abramovskycfb5e082016-01-14 19:12:57 +02001463
1464static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1465 u32 *user_index)
1466{
1467 if (cqe_version) {
1468 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1469 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1470 return -EINVAL;
1471 *user_index = cmd_uidx;
1472 } else {
1473 *user_index = MLX5_IB_DEFAULT_UIDX;
1474 }
1475
1476 return 0;
1477}
Leon Romanovsky3085e292016-09-22 17:31:11 +03001478
1479static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1480 struct mlx5_ib_create_qp *ucmd,
1481 int inlen,
1482 u32 *user_index)
1483{
1484 u8 cqe_version = ucontext->cqe_version;
1485
Leon Romanovskya762d462020-03-10 11:14:31 +02001486 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1487 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
Leon Romanovsky3085e292016-09-22 17:31:11 +03001488 return 0;
1489
Leon Romanovskya762d462020-03-10 11:14:31 +02001490 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
Leon Romanovsky3085e292016-09-22 17:31:11 +03001491 return -EINVAL;
1492
1493 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1494}
1495
1496static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1497 struct mlx5_ib_create_srq *ucmd,
1498 int inlen,
1499 u32 *user_index)
1500{
1501 u8 cqe_version = ucontext->cqe_version;
1502
Leon Romanovskya762d462020-03-10 11:14:31 +02001503 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1504 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
Leon Romanovsky3085e292016-09-22 17:31:11 +03001505 return 0;
1506
Leon Romanovskya762d462020-03-10 11:14:31 +02001507 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
Leon Romanovsky3085e292016-09-22 17:31:11 +03001508 return -EINVAL;
1509
1510 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1511}
Eli Cohenb037c292017-01-03 23:55:26 +02001512
1513static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1514{
1515 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1516 MLX5_UARS_IN_PAGE : 1;
1517}
1518
Yishai Hadas31a78a52017-12-24 16:31:34 +02001519static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1520 struct mlx5_bfreg_info *bfregi)
Eli Cohenb037c292017-01-03 23:55:26 +02001521{
Yishai Hadas31a78a52017-12-24 16:31:34 +02001522 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
Eli Cohenb037c292017-01-03 23:55:26 +02001523}
1524
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001525unsigned long mlx5_ib_get_xlt_emergency_page(void);
1526void mlx5_ib_put_xlt_emergency_page(void);
1527
Yishai Hadas7c043e92018-06-17 13:00:03 +03001528int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
Leon Romanovsky05f58ce2018-07-08 13:50:21 +03001529 struct mlx5_bfreg_info *bfregi, u32 bfregn,
Yishai Hadas7c043e92018-06-17 13:00:03 +03001530 bool dyn_bfreg);
Mark Zhangd14133d2019-07-02 13:02:36 +03001531
1532int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
Parav Pandit3e1f0002019-07-23 10:31:17 +03001533u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
Moni Shoua0e6613b2019-08-15 11:38:31 +03001534
1535static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
Michael Guralnikd6de0bb2020-01-08 20:05:40 +02001536 bool do_modify_atomic, int access_flags)
Moni Shoua0e6613b2019-08-15 11:38:31 +03001537{
1538 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1539 return false;
1540
1541 if (do_modify_atomic &&
1542 MLX5_CAP_GEN(dev->mdev, atomic) &&
1543 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1544 return false;
1545
Michael Guralnik5e29d142020-02-27 13:38:34 +02001546 if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
1547 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) ||
1548 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)))
Michael Guralnikd6de0bb2020-01-08 20:05:40 +02001549 return false;
1550
Moni Shoua0e6613b2019-08-15 11:38:31 +03001551 return true;
1552}
Michael Guralnik11f552e2019-06-10 15:21:24 +03001553
1554int mlx5_ib_enable_driver(struct ib_device *dev);
1555int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
Eli Cohene126ba92013-07-07 17:25:49 +03001556#endif /* MLX5_IB_H */