blob: ff05afa864ee3f7c41817dac7a3130243ff06afd [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_H
34#define MLX5_IB_H
35
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <rdma/ib_verbs.h>
39#include <rdma/ib_smi.h>
40#include <linux/mlx5/driver.h>
41#include <linux/mlx5/cq.h>
42#include <linux/mlx5/qp.h>
43#include <linux/mlx5/srq.h>
44#include <linux/types.h>
majd@mellanox.com146d2f12016-01-14 19:13:02 +020045#include <linux/mlx5/transobj.h>
Matan Barakd2370e02016-02-29 18:05:30 +020046#include <rdma/ib_user_verbs.h>
Leon Romanovsky3085e292016-09-22 17:31:11 +030047#include <rdma/mlx5-abi.h>
Eli Cohene126ba92013-07-07 17:25:49 +030048
49#define mlx5_ib_dbg(dev, format, arg...) \
50pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
51 __LINE__, current->pid, ##arg)
52
53#define mlx5_ib_err(dev, format, arg...) \
54pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
55 __LINE__, current->pid, ##arg)
56
57#define mlx5_ib_warn(dev, format, arg...) \
58pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
59 __LINE__, current->pid, ##arg)
60
Matan Barakb368d7c2015-12-15 20:30:12 +020061#define field_avail(type, fld, sz) (offsetof(type, fld) + \
62 sizeof(((type *)0)->fld) <= (sz))
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020063#define MLX5_IB_DEFAULT_UIDX 0xffffff
64#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
Matan Barakb368d7c2015-12-15 20:30:12 +020065
Majd Dibbiny762f8992016-10-27 16:36:47 +030066#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
67
Eli Cohene126ba92013-07-07 17:25:49 +030068enum {
69 MLX5_IB_MMAP_CMD_SHIFT = 8,
70 MLX5_IB_MMAP_CMD_MASK = 0xff,
71};
72
73enum mlx5_ib_mmap_cmd {
74 MLX5_IB_MMAP_REGULAR_PAGE = 0,
Matan Barakd69e3bc2015-12-15 20:30:13 +020075 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
Guy Levi37aa5c32016-04-27 16:49:50 +030076 MLX5_IB_MMAP_WC_PAGE = 2,
77 MLX5_IB_MMAP_NC_PAGE = 3,
Matan Barakd69e3bc2015-12-15 20:30:13 +020078 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
79 MLX5_IB_MMAP_CORE_CLOCK = 5,
Eli Cohene126ba92013-07-07 17:25:49 +030080};
81
82enum {
83 MLX5_RES_SCAT_DATA32_CQE = 0x1,
84 MLX5_RES_SCAT_DATA64_CQE = 0x2,
85 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
86 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
87};
88
89enum mlx5_ib_latency_class {
90 MLX5_IB_LATENCY_CLASS_LOW,
91 MLX5_IB_LATENCY_CLASS_MEDIUM,
92 MLX5_IB_LATENCY_CLASS_HIGH,
93 MLX5_IB_LATENCY_CLASS_FAST_PATH
94};
95
96enum mlx5_ib_mad_ifc_flags {
97 MLX5_MAD_IFC_IGNORE_MKEY = 1,
98 MLX5_MAD_IFC_IGNORE_BKEY = 2,
99 MLX5_MAD_IFC_NET_VIEW = 4,
100};
101
Leon Romanovsky051f2632015-12-20 12:16:11 +0200102enum {
103 MLX5_CROSS_CHANNEL_UUAR = 0,
104};
105
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200106enum {
107 MLX5_CQE_VERSION_V0,
108 MLX5_CQE_VERSION_V1,
109};
110
Maor Gottlieb7c2344c2016-06-17 14:56:44 +0300111struct mlx5_ib_vma_private_data {
112 struct list_head list;
113 struct vm_area_struct *vma;
114};
115
Eli Cohene126ba92013-07-07 17:25:49 +0300116struct mlx5_ib_ucontext {
117 struct ib_ucontext ibucontext;
118 struct list_head db_page_list;
119
120 /* protect doorbell record alloc/free
121 */
122 struct mutex db_page_mutex;
123 struct mlx5_uuar_info uuari;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200124 u8 cqe_version;
majd@mellanox.com146d2f12016-01-14 19:13:02 +0200125 /* Transport Domain number */
126 u32 tdn;
Maor Gottlieb7c2344c2016-06-17 14:56:44 +0300127 struct list_head vma_private_list;
Eli Cohene126ba92013-07-07 17:25:49 +0300128};
129
130static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
131{
132 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
133}
134
135struct mlx5_ib_pd {
136 struct ib_pd ibpd;
137 u32 pdn;
Eli Cohene126ba92013-07-07 17:25:49 +0300138};
139
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200140#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
Maor Gottlieb35d190112016-03-07 18:51:47 +0200141#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200142#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
143#error "Invalid number of bypass priorities"
144#endif
145#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
146
147#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
Maor Gottliebcc0e5d42016-08-28 14:16:34 +0300148#define MLX5_IB_NUM_SNIFFER_FTS 2
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200149struct mlx5_ib_flow_prio {
150 struct mlx5_flow_table *flow_table;
151 unsigned int refcount;
152};
153
154struct mlx5_ib_flow_handler {
155 struct list_head list;
156 struct ib_flow ibflow;
Maor Gottlieb5497adc2016-08-28 14:16:31 +0300157 struct mlx5_ib_flow_prio *prio;
Mark Bloch74491de2016-08-31 11:24:25 +0000158 struct mlx5_flow_handle *rule;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200159};
160
161struct mlx5_ib_flow_db {
162 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
Maor Gottliebcc0e5d42016-08-28 14:16:34 +0300163 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
Aviv Heller9ef9c642016-09-18 20:48:01 +0300164 struct mlx5_flow_table *lag_demux_ft;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200165 /* Protect flow steering bypass flow tables
166 * when add/del flow rules.
167 * only single add/removal of flow steering rule could be done
168 * simultaneously.
169 */
170 struct mutex lock;
171};
172
Eli Cohene126ba92013-07-07 17:25:49 +0300173/* Use macros here so that don't have to duplicate
174 * enum ib_send_flags and enum ib_qp_type for low-level driver
175 */
176
177#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
Haggai Eran968e78d2014-12-11 17:04:11 +0200178#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
179#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
Noa Osherovich56e11d62016-02-29 16:46:51 +0200180
181#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
182#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
183#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
184
Eli Cohene126ba92013-07-07 17:25:49 +0300185#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
Haggai Erand16e91d2016-02-29 15:45:05 +0200186/*
187 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
188 * creates the actual hardware QP.
189 */
190#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
Eli Cohene126ba92013-07-07 17:25:49 +0300191#define MLX5_IB_WR_UMR IB_WR_RESERVED1
192
Haggai Eranb11a4f92016-02-29 15:45:03 +0200193/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
194 *
195 * These flags are intended for internal use by the mlx5_ib driver, and they
196 * rely on the range reserved for that use in the ib_qp_create_flags enum.
197 */
198
199/* Create a UD QP whose source QP number is 1 */
200static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
201{
202 return IB_QP_CREATE_RESERVED_START;
203}
204
Eli Cohene126ba92013-07-07 17:25:49 +0300205struct wr_list {
206 u16 opcode;
207 u16 next;
208};
209
210struct mlx5_ib_wq {
211 u64 *wrid;
212 u32 *wr_data;
213 struct wr_list *w_list;
214 unsigned *wqe_head;
215 u16 unsig_count;
216
217 /* serialize post to the work queue
218 */
219 spinlock_t lock;
220 int wqe_cnt;
221 int max_post;
222 int max_gs;
223 int offset;
224 int wqe_shift;
225 unsigned head;
226 unsigned tail;
227 u16 cur_post;
228 u16 last_poll;
229 void *qend;
230};
231
Yishai Hadas79b20a62016-05-23 15:20:50 +0300232struct mlx5_ib_rwq {
233 struct ib_wq ibwq;
Yishai Hadas350d0e42016-08-28 14:58:18 +0300234 struct mlx5_core_qp core_qp;
Yishai Hadas79b20a62016-05-23 15:20:50 +0300235 u32 rq_num_pas;
236 u32 log_rq_stride;
237 u32 log_rq_size;
238 u32 rq_page_offset;
239 u32 log_page_size;
240 struct ib_umem *umem;
241 size_t buf_size;
242 unsigned int page_shift;
243 int create_type;
244 struct mlx5_db db;
245 u32 user_index;
246 u32 wqe_count;
247 u32 wqe_shift;
248 int wq_sig;
249};
250
Eli Cohene126ba92013-07-07 17:25:49 +0300251enum {
252 MLX5_QP_USER,
253 MLX5_QP_KERNEL,
254 MLX5_QP_EMPTY
255};
256
Yishai Hadas79b20a62016-05-23 15:20:50 +0300257enum {
258 MLX5_WQ_USER,
259 MLX5_WQ_KERNEL
260};
261
Yishai Hadasc5f90922016-05-23 15:20:53 +0300262struct mlx5_ib_rwq_ind_table {
263 struct ib_rwq_ind_table ib_rwq_ind_tbl;
264 u32 rqtn;
265};
266
Haggai Eran6aec21f2014-12-11 17:04:23 +0200267/*
268 * Connect-IB can trigger up to four concurrent pagefaults
269 * per-QP.
270 */
271enum mlx5_ib_pagefault_context {
272 MLX5_IB_PAGEFAULT_RESPONDER_READ,
273 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
274 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
275 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
276 MLX5_IB_PAGEFAULT_CONTEXTS
277};
278
279static inline enum mlx5_ib_pagefault_context
280 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
281{
282 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
283}
284
285struct mlx5_ib_pfault {
286 struct work_struct work;
287 struct mlx5_pagefault mpfault;
288};
289
majd@mellanox.com19098df2016-01-14 19:13:03 +0200290struct mlx5_ib_ubuffer {
291 struct ib_umem *umem;
292 int buf_size;
293 u64 buf_addr;
294};
295
296struct mlx5_ib_qp_base {
297 struct mlx5_ib_qp *container_mibqp;
298 struct mlx5_core_qp mqp;
299 struct mlx5_ib_ubuffer ubuffer;
300};
301
302struct mlx5_ib_qp_trans {
303 struct mlx5_ib_qp_base base;
304 u16 xrcdn;
305 u8 alt_port;
306 u8 atomic_rd_en;
307 u8 resp_depth;
308};
309
Yishai Hadas28d61372016-05-23 15:20:56 +0300310struct mlx5_ib_rss_qp {
311 u32 tirn;
312};
313
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200314struct mlx5_ib_rq {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200315 struct mlx5_ib_qp_base base;
316 struct mlx5_ib_wq *rq;
317 struct mlx5_ib_ubuffer ubuffer;
318 struct mlx5_db *doorbell;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200319 u32 tirn;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200320 u8 state;
321};
322
323struct mlx5_ib_sq {
324 struct mlx5_ib_qp_base base;
325 struct mlx5_ib_wq *sq;
326 struct mlx5_ib_ubuffer ubuffer;
327 struct mlx5_db *doorbell;
328 u32 tisn;
329 u8 state;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200330};
331
332struct mlx5_ib_raw_packet_qp {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200333 struct mlx5_ib_sq sq;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200334 struct mlx5_ib_rq rq;
335};
336
Eli Cohene126ba92013-07-07 17:25:49 +0300337struct mlx5_ib_qp {
338 struct ib_qp ibqp;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200339 union {
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200340 struct mlx5_ib_qp_trans trans_qp;
341 struct mlx5_ib_raw_packet_qp raw_packet_qp;
Yishai Hadas28d61372016-05-23 15:20:56 +0300342 struct mlx5_ib_rss_qp rss_qp;
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200343 };
Eli Cohene126ba92013-07-07 17:25:49 +0300344 struct mlx5_buf buf;
345
346 struct mlx5_db db;
347 struct mlx5_ib_wq rq;
348
Eli Cohene126ba92013-07-07 17:25:49 +0300349 u8 sq_signal_bits;
350 u8 fm_cache;
Eli Cohene126ba92013-07-07 17:25:49 +0300351 struct mlx5_ib_wq sq;
352
Eli Cohene126ba92013-07-07 17:25:49 +0300353 /* serialize qp state modifications
354 */
355 struct mutex mutex;
Eli Cohene126ba92013-07-07 17:25:49 +0300356 u32 flags;
357 u8 port;
Eli Cohene126ba92013-07-07 17:25:49 +0300358 u8 state;
Eli Cohene126ba92013-07-07 17:25:49 +0300359 int wq_sig;
360 int scat_cqe;
361 int max_inline_data;
362 struct mlx5_bf *bf;
363 int has_rq;
364
365 /* only for user space QPs. For kernel
366 * we have it from the bf object
367 */
368 int uuarn;
369
370 int create_type;
Sagi Grimberge1e66cc2014-02-23 14:19:07 +0200371
372 /* Store signature errors */
373 bool signature_en;
Haggai Eran6aec21f2014-12-11 17:04:23 +0200374
375#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
376 /*
377 * A flag that is true for QP's that are in a state that doesn't
378 * allow page faults, and shouldn't schedule any more faults.
379 */
380 int disable_page_faults;
381 /*
382 * The disable_page_faults_lock protects a QP's disable_page_faults
383 * field, allowing for a thread to atomically check whether the QP
384 * allows page faults, and if so schedule a page fault.
385 */
386 spinlock_t disable_page_faults_lock;
387 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
388#endif
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300389 struct list_head qps_list;
390 struct list_head cq_recv_list;
391 struct list_head cq_send_list;
Eli Cohene126ba92013-07-07 17:25:49 +0300392};
393
394struct mlx5_ib_cq_buf {
395 struct mlx5_buf buf;
396 struct ib_umem *umem;
397 int cqe_size;
Eli Cohenbde51582014-01-14 17:45:18 +0200398 int nent;
Eli Cohene126ba92013-07-07 17:25:49 +0300399};
400
401enum mlx5_ib_qp_flags {
Erez Shitritf0313962016-02-21 16:27:17 +0200402 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
403 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
404 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
405 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
406 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
407 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
Haggai Eranb11a4f92016-02-29 15:45:03 +0200408 /* QP uses 1 as its source QP number */
409 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
Majd Dibbiny358e42e2016-04-17 17:19:37 +0300410 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
Yishai Hadasd9f88e52016-08-28 10:58:37 +0300411 MLX5_IB_QP_RSS = 1 << 8,
Eli Cohene126ba92013-07-07 17:25:49 +0300412};
413
Haggai Eran968e78d2014-12-11 17:04:11 +0200414struct mlx5_umr_wr {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100415 struct ib_send_wr wr;
Haggai Eran968e78d2014-12-11 17:04:11 +0200416 union {
417 u64 virt_addr;
418 u64 offset;
419 } target;
420 struct ib_pd *pd;
421 unsigned int page_shift;
422 unsigned int npages;
423 u32 length;
424 int access_flags;
425 u32 mkey;
426};
427
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100428static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
429{
430 return container_of(wr, struct mlx5_umr_wr, wr);
431}
432
Eli Cohene126ba92013-07-07 17:25:49 +0300433struct mlx5_shared_mr_info {
434 int mr_id;
435 struct ib_umem *umem;
436};
437
438struct mlx5_ib_cq {
439 struct ib_cq ibcq;
440 struct mlx5_core_cq mcq;
441 struct mlx5_ib_cq_buf buf;
442 struct mlx5_db db;
443
444 /* serialize access to the CQ
445 */
446 spinlock_t lock;
447
448 /* protect resize cq
449 */
450 struct mutex resize_mutex;
Eli Cohenbde51582014-01-14 17:45:18 +0200451 struct mlx5_ib_cq_buf *resize_buf;
Eli Cohene126ba92013-07-07 17:25:49 +0300452 struct ib_umem *resize_umem;
453 int cqe_size;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300454 struct list_head list_send_qp;
455 struct list_head list_recv_qp;
Leon Romanovsky051f2632015-12-20 12:16:11 +0200456 u32 create_flags;
Haggai Eran25361e02016-02-29 15:45:08 +0200457 struct list_head wc_list;
458 enum ib_cq_notify_flags notify_flags;
459 struct work_struct notify_work;
460};
461
462struct mlx5_ib_wc {
463 struct ib_wc wc;
464 struct list_head list;
Eli Cohene126ba92013-07-07 17:25:49 +0300465};
466
467struct mlx5_ib_srq {
468 struct ib_srq ibsrq;
469 struct mlx5_core_srq msrq;
470 struct mlx5_buf buf;
471 struct mlx5_db db;
472 u64 *wrid;
473 /* protect SRQ hanlding
474 */
475 spinlock_t lock;
476 int head;
477 int tail;
478 u16 wqe_ctr;
479 struct ib_umem *umem;
480 /* serialize arming a SRQ
481 */
482 struct mutex mutex;
483 int wq_sig;
484};
485
486struct mlx5_ib_xrcd {
487 struct ib_xrcd ibxrcd;
488 u32 xrcdn;
489};
490
Haggai Erancc149f752014-12-11 17:04:21 +0200491enum mlx5_ib_mtt_access_flags {
492 MLX5_IB_MTT_READ = (1 << 0),
493 MLX5_IB_MTT_WRITE = (1 << 1),
494};
495
496#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
497
Eli Cohene126ba92013-07-07 17:25:49 +0300498struct mlx5_ib_mr {
499 struct ib_mr ibmr;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300500 void *descs;
501 dma_addr_t desc_map;
502 int ndescs;
503 int max_descs;
504 int desc_size;
Sagi Grimbergb005d312016-02-29 19:07:33 +0200505 int access_mode;
Matan Baraka606b0f2016-02-29 18:05:28 +0200506 struct mlx5_core_mkey mmkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300507 struct ib_umem *umem;
508 struct mlx5_shared_mr_info *smr_info;
509 struct list_head list;
510 int order;
511 int umred;
Eli Cohene126ba92013-07-07 17:25:49 +0300512 int npages;
Eli Cohen746b5582013-10-23 09:53:14 +0300513 struct mlx5_ib_dev *dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300514 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
Sagi Grimberg3121e3c2014-02-23 14:19:06 +0200515 struct mlx5_core_sig_ctx *sig;
Haggai Eranb4cfe442014-12-11 17:04:26 +0200516 int live;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +0300517 void *descs_alloc;
Noa Osherovich56e11d62016-02-29 16:46:51 +0200518 int access_flags; /* Needed for rereg MR */
Eli Cohene126ba92013-07-07 17:25:49 +0300519};
520
Matan Barakd2370e02016-02-29 18:05:30 +0200521struct mlx5_ib_mw {
522 struct ib_mw ibmw;
523 struct mlx5_core_mkey mmkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300524};
525
Shachar Raindela74d2412014-05-22 14:50:12 +0300526struct mlx5_ib_umr_context {
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100527 struct ib_cqe cqe;
Shachar Raindela74d2412014-05-22 14:50:12 +0300528 enum ib_wc_status status;
529 struct completion done;
530};
531
Eli Cohene126ba92013-07-07 17:25:49 +0300532struct umr_common {
533 struct ib_pd *pd;
534 struct ib_cq *cq;
535 struct ib_qp *qp;
Eli Cohene126ba92013-07-07 17:25:49 +0300536 /* control access to UMR QP
537 */
538 struct semaphore sem;
539};
540
541enum {
542 MLX5_FMR_INVALID,
543 MLX5_FMR_VALID,
544 MLX5_FMR_BUSY,
545};
546
Eli Cohene126ba92013-07-07 17:25:49 +0300547struct mlx5_cache_ent {
548 struct list_head head;
549 /* sync access to the cahce entry
550 */
551 spinlock_t lock;
552
553
554 struct dentry *dir;
555 char name[4];
556 u32 order;
557 u32 size;
558 u32 cur;
559 u32 miss;
560 u32 limit;
561
562 struct dentry *fsize;
563 struct dentry *fcur;
564 struct dentry *fmiss;
565 struct dentry *flimit;
566
567 struct mlx5_ib_dev *dev;
568 struct work_struct work;
569 struct delayed_work dwork;
Eli Cohen746b5582013-10-23 09:53:14 +0300570 int pending;
Eli Cohene126ba92013-07-07 17:25:49 +0300571};
572
573struct mlx5_mr_cache {
574 struct workqueue_struct *wq;
575 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
576 int stopped;
577 struct dentry *root;
578 unsigned long last_add;
579};
580
Haggai Erand16e91d2016-02-29 15:45:05 +0200581struct mlx5_ib_gsi_qp;
582
583struct mlx5_ib_port_resources {
Haggai Eran7722f472016-02-29 15:45:07 +0200584 struct mlx5_ib_resources *devr;
Haggai Erand16e91d2016-02-29 15:45:05 +0200585 struct mlx5_ib_gsi_qp *gsi;
Haggai Eran7722f472016-02-29 15:45:07 +0200586 struct work_struct pkey_change_work;
Haggai Erand16e91d2016-02-29 15:45:05 +0200587};
588
Eli Cohene126ba92013-07-07 17:25:49 +0300589struct mlx5_ib_resources {
590 struct ib_cq *c0;
591 struct ib_xrcd *x0;
592 struct ib_xrcd *x1;
593 struct ib_pd *p0;
594 struct ib_srq *s0;
Haggai Abramonvsky4aa17b22015-06-04 19:30:48 +0300595 struct ib_srq *s1;
Haggai Erand16e91d2016-02-29 15:45:05 +0200596 struct mlx5_ib_port_resources ports[2];
597 /* Protects changes to the port resources */
598 struct mutex mutex;
Eli Cohene126ba92013-07-07 17:25:49 +0300599};
600
Mark Bloch0837e862016-06-17 15:10:55 +0300601struct mlx5_ib_port {
602 u16 q_cnt_id;
603};
604
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200605struct mlx5_roce {
606 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
607 * netdev pointer
608 */
609 rwlock_t netdev_lock;
610 struct net_device *netdev;
611 struct notifier_block nb;
Aviv Heller13eab212016-09-18 20:48:04 +0300612 atomic_t next_port;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200613};
614
Eli Cohene126ba92013-07-07 17:25:49 +0300615struct mlx5_ib_dev {
616 struct ib_device ib_dev;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300617 struct mlx5_core_dev *mdev;
Achiad Shochatfc24fc52015-12-23 18:47:17 +0200618 struct mlx5_roce roce;
Eli Cohene126ba92013-07-07 17:25:49 +0300619 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300620 int num_ports;
Eli Cohene126ba92013-07-07 17:25:49 +0300621 /* serialize update of capability mask
622 */
623 struct mutex cap_mask_mutex;
624 bool ib_active;
625 struct umr_common umrc;
626 /* sync used page count stats
627 */
Eli Cohene126ba92013-07-07 17:25:49 +0300628 struct mlx5_ib_resources devr;
629 struct mlx5_mr_cache cache;
Eli Cohen746b5582013-10-23 09:53:14 +0300630 struct timer_list delay_timer;
631 int fill_delay;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200632#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
633 struct ib_odp_caps odp_caps;
Haggai Eran6aec21f2014-12-11 17:04:23 +0200634 /*
635 * Sleepable RCU that prevents destruction of MRs while they are still
636 * being used by a page fault handler.
637 */
638 struct srcu_struct mr_srcu;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200639#endif
Maor Gottlieb038d2ef2016-01-11 10:26:07 +0200640 struct mlx5_ib_flow_db flow_db;
Maor Gottlieb89ea94a72016-06-17 15:01:38 +0300641 /* protect resources needed as part of reset flow */
642 spinlock_t reset_flow_resource_lock;
643 struct list_head qp_list;
Mark Bloch0837e862016-06-17 15:10:55 +0300644 /* Array with num_ports elements */
645 struct mlx5_ib_port *port;
Eli Cohene126ba92013-07-07 17:25:49 +0300646};
647
648static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
649{
650 return container_of(mcq, struct mlx5_ib_cq, mcq);
651}
652
653static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
654{
655 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
656}
657
658static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
659{
660 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
661}
662
Eli Cohene126ba92013-07-07 17:25:49 +0300663static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
664{
665 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
666}
667
668static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
669{
majd@mellanox.com19098df2016-01-14 19:13:03 +0200670 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
Eli Cohene126ba92013-07-07 17:25:49 +0300671}
672
Yishai Hadas350d0e42016-08-28 14:58:18 +0300673static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
674{
675 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
676}
677
Matan Baraka606b0f2016-02-29 18:05:28 +0200678static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200679{
Matan Baraka606b0f2016-02-29 18:05:28 +0200680 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200681}
682
Eli Cohene126ba92013-07-07 17:25:49 +0300683static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
684{
685 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
686}
687
688static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
689{
690 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
691}
692
693static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
694{
695 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
696}
697
Yishai Hadas79b20a62016-05-23 15:20:50 +0300698static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
699{
700 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
701}
702
Yishai Hadasc5f90922016-05-23 15:20:53 +0300703static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
704{
705 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
706}
707
Eli Cohene126ba92013-07-07 17:25:49 +0300708static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
709{
710 return container_of(msrq, struct mlx5_ib_srq, msrq);
711}
712
713static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
714{
715 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
716}
717
Matan Barakd2370e02016-02-29 18:05:30 +0200718static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
719{
720 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
721}
722
Eli Cohene126ba92013-07-07 17:25:49 +0300723struct mlx5_ib_ah {
724 struct ib_ah ibah;
725 struct mlx5_av av;
726};
727
728static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
729{
730 return container_of(ibah, struct mlx5_ib_ah, ibah);
731}
732
Eli Cohene126ba92013-07-07 17:25:49 +0300733int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
734 struct mlx5_db *db);
735void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
736void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
737void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
738void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
739int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
Ira Weinya97e2d82015-05-31 17:15:30 -0400740 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
741 const void *in_mad, void *response_mad);
Moni Shoua477864c2016-11-23 08:23:24 +0200742struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
743 struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +0300744int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
745int mlx5_ib_destroy_ah(struct ib_ah *ah);
746struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
747 struct ib_srq_init_attr *init_attr,
748 struct ib_udata *udata);
749int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
750 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
751int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
752int mlx5_ib_destroy_srq(struct ib_srq *srq);
753int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
754 struct ib_recv_wr **bad_wr);
755struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
756 struct ib_qp_init_attr *init_attr,
757 struct ib_udata *udata);
758int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
759 int attr_mask, struct ib_udata *udata);
760int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
761 struct ib_qp_init_attr *qp_init_attr);
762int mlx5_ib_destroy_qp(struct ib_qp *qp);
763int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
764 struct ib_send_wr **bad_wr);
765int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
766 struct ib_recv_wr **bad_wr);
767void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
Haggai Eranc1395a22014-12-11 17:04:14 +0200768int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
majd@mellanox.com19098df2016-01-14 19:13:03 +0200769 void *buffer, u32 length,
770 struct mlx5_ib_qp_base *base);
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300771struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
772 const struct ib_cq_init_attr *attr,
773 struct ib_ucontext *context,
Eli Cohene126ba92013-07-07 17:25:49 +0300774 struct ib_udata *udata);
775int mlx5_ib_destroy_cq(struct ib_cq *cq);
776int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
777int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
778int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
779int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
780struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
781struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
782 u64 virt_addr, int access_flags,
783 struct ib_udata *udata);
Matan Barakd2370e02016-02-29 18:05:30 +0200784struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
785 struct ib_udata *udata);
786int mlx5_ib_dealloc_mw(struct ib_mw *mw);
Haggai Eran832a6b02014-12-11 17:04:22 +0200787int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
788 int npages, int zap);
Noa Osherovich56e11d62016-02-29 16:46:51 +0200789int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
790 u64 length, u64 virt_addr, int access_flags,
791 struct ib_pd *pd, struct ib_udata *udata);
Eli Cohene126ba92013-07-07 17:25:49 +0300792int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300793struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
794 enum ib_mr_type mr_type,
795 u32 max_num_sg);
Christoph Hellwigff2ba992016-05-03 18:01:04 +0200796int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -0700797 unsigned int *sg_offset);
Eli Cohene126ba92013-07-07 17:25:49 +0300798int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -0400799 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -0400800 const struct ib_mad_hdr *in, size_t in_mad_size,
801 struct ib_mad_hdr *out, size_t *out_mad_size,
802 u16 *out_mad_pkey_index);
Eli Cohene126ba92013-07-07 17:25:49 +0300803struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
804 struct ib_ucontext *context,
805 struct ib_udata *udata);
806int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
Eli Cohene126ba92013-07-07 17:25:49 +0300807int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
808int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
Majd Dibbiny1b5daf12015-06-04 19:30:46 +0300809int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
810 struct ib_smp *out_mad);
811int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
812 __be64 *sys_image_guid);
813int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
814 u16 *max_pkeys);
815int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
816 u32 *vendor_id);
817int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
818int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
819int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
820 u16 *pkey);
821int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
822 union ib_gid *gid);
823int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
824 struct ib_port_attr *props);
Eli Cohene126ba92013-07-07 17:25:49 +0300825int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
826 struct ib_port_attr *props);
827int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
828void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
Majd Dibbiny762f8992016-10-27 16:36:47 +0300829void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
830 unsigned long max_page_shift,
831 int *count, int *shift,
Eli Cohene126ba92013-07-07 17:25:49 +0300832 int *ncont, int *order);
Haggai Eran832a6b02014-12-11 17:04:22 +0200833void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
834 int page_shift, size_t offset, size_t num_pages,
835 __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300836void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
Haggai Erancc149f752014-12-11 17:04:21 +0200837 int page_shift, __be64 *pas, int access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300838void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
839int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
840int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
841int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
842int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
Sagi Grimbergd5436ba2014-02-23 14:19:12 +0200843int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
844 struct ib_mr_status *mr_status);
Yishai Hadas79b20a62016-05-23 15:20:50 +0300845struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
846 struct ib_wq_init_attr *init_attr,
847 struct ib_udata *udata);
848int mlx5_ib_destroy_wq(struct ib_wq *wq);
849int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
850 u32 wq_attr_mask, struct ib_udata *udata);
Yishai Hadasc5f90922016-05-23 15:20:53 +0300851struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
852 struct ib_rwq_ind_table_init_attr *init_attr,
853 struct ib_udata *udata);
854int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
Eli Cohene126ba92013-07-07 17:25:49 +0300855
Haggai Eran8cdd3122014-12-11 17:04:20 +0200856#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eran6aec21f2014-12-11 17:04:23 +0200857extern struct workqueue_struct *mlx5_ib_page_fault_wq;
858
Saeed Mahameed938fe832015-05-28 22:28:41 +0300859void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
Haggai Eran6aec21f2014-12-11 17:04:23 +0200860void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
861 struct mlx5_ib_pfault *pfault);
862void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
863int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
864void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
865int __init mlx5_ib_odp_init(void);
866void mlx5_ib_odp_cleanup(void);
867void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
868void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200869void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
870 unsigned long end);
Haggai Eran6aec21f2014-12-11 17:04:23 +0200871#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
Saeed Mahameed938fe832015-05-28 22:28:41 +0300872static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
Haggai Eran8cdd3122014-12-11 17:04:20 +0200873{
Saeed Mahameed938fe832015-05-28 22:28:41 +0300874 return;
Haggai Eran8cdd3122014-12-11 17:04:20 +0200875}
Haggai Eran6aec21f2014-12-11 17:04:23 +0200876
877static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
878static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
879static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
880static inline int mlx5_ib_odp_init(void) { return 0; }
881static inline void mlx5_ib_odp_cleanup(void) {}
882static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
883static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
884
Haggai Eran8cdd3122014-12-11 17:04:20 +0200885#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
886
Arnd Bergmann9967c702016-03-23 11:37:45 +0100887int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
888 u8 port, struct ifla_vf_info *info);
889int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
890 u8 port, int state);
891int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
892 u8 port, struct ifla_vf_stats *stats);
893int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
894 u64 guid, int type);
895
Achiad Shochat2811ba52015-12-23 18:47:24 +0200896__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
897 int index);
898
Haggai Erand16e91d2016-02-29 15:45:05 +0200899/* GSI QP helper functions */
900struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
901 struct ib_qp_init_attr *init_attr);
902int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
903int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
904 int attr_mask);
905int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
906 int qp_attr_mask,
907 struct ib_qp_init_attr *qp_init_attr);
908int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
909 struct ib_send_wr **bad_wr);
910int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
911 struct ib_recv_wr **bad_wr);
Haggai Eran7722f472016-02-29 15:45:07 +0200912void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
Haggai Erand16e91d2016-02-29 15:45:05 +0200913
Haggai Eran25361e02016-02-29 15:45:08 +0200914int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
915
Eli Cohene126ba92013-07-07 17:25:49 +0300916static inline void init_query_mad(struct ib_smp *mad)
917{
918 mad->base_version = 1;
919 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
920 mad->class_version = 1;
921 mad->method = IB_MGMT_METHOD_GET;
922}
923
924static inline u8 convert_access(int acc)
925{
926 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
927 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
928 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
929 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
930 MLX5_PERM_LOCAL_READ;
931}
932
Sagi Grimbergb6364012015-09-02 22:23:04 +0300933static inline int is_qp1(enum ib_qp_type qp_type)
934{
Haggai Erand16e91d2016-02-29 15:45:05 +0200935 return qp_type == MLX5_IB_QPT_HW_GSI;
Sagi Grimbergb6364012015-09-02 22:23:04 +0300936}
937
Haggai Erancc149f752014-12-11 17:04:21 +0200938#define MLX5_MAX_UMR_SHIFT 16
939#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
940
Leon Romanovsky051f2632015-12-20 12:16:11 +0200941static inline u32 check_cq_create_flags(u32 flags)
942{
943 /*
944 * It returns non-zero value for unsupported CQ
945 * create flags, otherwise it returns zero.
946 */
Leon Romanovsky34356f62015-12-29 17:01:30 +0200947 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
948 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
Leon Romanovsky051f2632015-12-20 12:16:11 +0200949}
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200950
951static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
952 u32 *user_index)
953{
954 if (cqe_version) {
955 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
956 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
957 return -EINVAL;
958 *user_index = cmd_uidx;
959 } else {
960 *user_index = MLX5_IB_DEFAULT_UIDX;
961 }
962
963 return 0;
964}
Leon Romanovsky3085e292016-09-22 17:31:11 +0300965
966static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
967 struct mlx5_ib_create_qp *ucmd,
968 int inlen,
969 u32 *user_index)
970{
971 u8 cqe_version = ucontext->cqe_version;
972
973 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
974 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
975 return 0;
976
977 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
978 !!cqe_version))
979 return -EINVAL;
980
981 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
982}
983
984static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
985 struct mlx5_ib_create_srq *ucmd,
986 int inlen,
987 u32 *user_index)
988{
989 u8 cqe_version = ucontext->cqe_version;
990
991 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
992 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
993 return 0;
994
995 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
996 !!cqe_version))
997 return -EINVAL;
998
999 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1000}
Eli Cohene126ba92013-07-07 17:25:49 +03001001#endif /* MLX5_IB_H */