blob: 0e49d5b30a4c5a96bae0d00a390da514cf1dce63 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_USER_H
34#define MLX5_IB_USER_H
35
36#include <linux/types.h>
37
Haggai Abramovskycfb5e082016-01-14 19:12:57 +020038#include "mlx5_ib.h"
39
Eli Cohene126ba92013-07-07 17:25:49 +030040enum {
41 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
42 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
43};
44
45enum {
46 MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
47};
48
Yishai Hadas79b20a62016-05-23 15:20:50 +030049enum {
50 MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
51};
52
Eli Cohene126ba92013-07-07 17:25:49 +030053
54/* Increment this value if any changes that break userspace ABI
55 * compatibility are made.
56 */
57#define MLX5_IB_UVERBS_ABI_VERSION 1
58
59/* Make sure that all structs defined in this file remain laid out so
60 * that they pack the same way on 32-bit and 64-bit architectures (to
61 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
62 * In particular do not use pointer types -- pass pointers in __u64
63 * instead.
64 */
65
66struct mlx5_ib_alloc_ucontext_req {
67 __u32 total_num_uuars;
68 __u32 num_low_latency_uuars;
69};
70
Eli Cohen78c0f982014-01-30 13:49:48 +020071struct mlx5_ib_alloc_ucontext_req_v2 {
72 __u32 total_num_uuars;
73 __u32 num_low_latency_uuars;
74 __u32 flags;
Matan Barakb368d7c2015-12-15 20:30:12 +020075 __u32 comp_mask;
Haggai Abramovskyf72300c2016-01-14 19:12:58 +020076 __u8 max_cqe_version;
77 __u8 reserved0;
78 __u16 reserved1;
79 __u32 reserved2;
Matan Barakb368d7c2015-12-15 20:30:12 +020080};
81
82enum mlx5_ib_alloc_ucontext_resp_mask {
83 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
Eli Cohen78c0f982014-01-30 13:49:48 +020084};
85
Bodong Wang402ca532016-06-17 15:02:20 +030086enum mlx5_user_cmds_supp_uhw {
87 MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
88};
89
Eli Cohene126ba92013-07-07 17:25:49 +030090struct mlx5_ib_alloc_ucontext_resp {
91 __u32 qp_tab_size;
92 __u32 bf_reg_size;
93 __u32 tot_uuars;
94 __u32 cache_line_size;
95 __u16 max_sq_desc_sz;
96 __u16 max_rq_desc_sz;
97 __u32 max_send_wqebb;
98 __u32 max_recv_wr;
99 __u32 max_srq_recv_wr;
100 __u16 num_ports;
Matan Barakb368d7c2015-12-15 20:30:12 +0200101 __u16 reserved1;
102 __u32 comp_mask;
103 __u32 response_length;
Haggai Abramovskyf72300c2016-01-14 19:12:58 +0200104 __u8 cqe_version;
Bodong Wang402ca532016-06-17 15:02:20 +0300105 __u8 cmds_supp_uhw;
106 __u16 reserved2;
Matan Barakb368d7c2015-12-15 20:30:12 +0200107 __u64 hca_core_clock_offset;
Eli Cohene126ba92013-07-07 17:25:49 +0300108};
109
110struct mlx5_ib_alloc_pd_resp {
111 __u32 pdn;
112};
113
Bodong Wang402ca532016-06-17 15:02:20 +0300114struct mlx5_ib_tso_caps {
115 __u32 max_tso; /* Maximum tso payload size in bytes */
116
117 /* Corresponding bit will be set if qp type from
118 * 'enum ib_qp_type' is supported, e.g.
119 * supported_qpts |= 1 << IB_QPT_UD
120 */
121 __u32 supported_qpts;
122};
123
Yishai Hadas31f69a82016-08-28 11:28:45 +0300124struct mlx5_ib_rss_caps {
125 __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
126 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
127 __u8 reserved[7];
128};
129
Bodong Wang402ca532016-06-17 15:02:20 +0300130struct mlx5_ib_query_device_resp {
131 __u32 comp_mask;
132 __u32 response_length;
133 struct mlx5_ib_tso_caps tso_caps;
Yishai Hadas31f69a82016-08-28 11:28:45 +0300134 struct mlx5_ib_rss_caps rss_caps;
Bodong Wang402ca532016-06-17 15:02:20 +0300135};
136
Eli Cohene126ba92013-07-07 17:25:49 +0300137struct mlx5_ib_create_cq {
138 __u64 buf_addr;
139 __u64 db_addr;
140 __u32 cqe_size;
Yann Droneauda8237b32014-05-05 19:33:21 +0200141 __u32 reserved; /* explicit padding (optional on i386) */
Eli Cohene126ba92013-07-07 17:25:49 +0300142};
143
144struct mlx5_ib_create_cq_resp {
145 __u32 cqn;
146 __u32 reserved;
147};
148
149struct mlx5_ib_resize_cq {
150 __u64 buf_addr;
Eli Cohenbde51582014-01-14 17:45:18 +0200151 __u16 cqe_size;
152 __u16 reserved0;
153 __u32 reserved1;
Eli Cohene126ba92013-07-07 17:25:49 +0300154};
155
156struct mlx5_ib_create_srq {
157 __u64 buf_addr;
158 __u64 db_addr;
159 __u32 flags;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200160 __u32 reserved0; /* explicit padding (optional on i386) */
161 __u32 uidx;
162 __u32 reserved1;
Eli Cohene126ba92013-07-07 17:25:49 +0300163};
164
165struct mlx5_ib_create_srq_resp {
166 __u32 srqn;
167 __u32 reserved;
168};
169
170struct mlx5_ib_create_qp {
171 __u64 buf_addr;
172 __u64 db_addr;
173 __u32 sq_wqe_count;
174 __u32 rq_wqe_count;
175 __u32 rq_wqe_shift;
176 __u32 flags;
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200177 __u32 uidx;
178 __u32 reserved0;
majd@mellanox.com0fb2ed62016-01-14 19:13:04 +0200179 __u64 sq_buf_addr;
Eli Cohene126ba92013-07-07 17:25:49 +0300180};
181
Yishai Hadas28d61372016-05-23 15:20:56 +0300182/* RX Hash function flags */
183enum mlx5_rx_hash_function_flags {
184 MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
185};
186
187/*
188 * RX Hash flags, these flags allows to set which incoming packet's field should
189 * participates in RX Hash. Each flag represent certain packet's field,
190 * when the flag is set the field that is represented by the flag will
191 * participate in RX Hash calculation.
192 * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
193 * and *TCP and *UDP flags can't be enabled together on the same QP.
194*/
195enum mlx5_rx_hash_fields {
196 MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
197 MLX5_RX_HASH_DST_IPV4 = 1 << 1,
198 MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
199 MLX5_RX_HASH_DST_IPV6 = 1 << 3,
200 MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
201 MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
202 MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
203 MLX5_RX_HASH_DST_PORT_UDP = 1 << 7
204};
205
206struct mlx5_ib_create_qp_rss {
207 __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
208 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
209 __u8 rx_key_len; /* valid only for Toeplitz */
210 __u8 reserved[6];
211 __u8 rx_hash_key[128]; /* valid only for Toeplitz */
212 __u32 comp_mask;
213 __u32 reserved1;
214};
215
Eli Cohene126ba92013-07-07 17:25:49 +0300216struct mlx5_ib_create_qp_resp {
217 __u32 uuar_index;
218};
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200219
Matan Barakd2370e02016-02-29 18:05:30 +0200220struct mlx5_ib_alloc_mw {
221 __u32 comp_mask;
222 __u8 num_klms;
223 __u8 reserved1;
224 __u16 reserved2;
225};
226
Yishai Hadas79b20a62016-05-23 15:20:50 +0300227struct mlx5_ib_create_wq {
228 __u64 buf_addr;
229 __u64 db_addr;
230 __u32 rq_wqe_count;
231 __u32 rq_wqe_shift;
232 __u32 user_index;
233 __u32 flags;
234 __u32 comp_mask;
235 __u32 reserved;
236};
237
238struct mlx5_ib_create_wq_resp {
239 __u32 response_length;
240 __u32 reserved;
241};
242
Yishai Hadasc5f90922016-05-23 15:20:53 +0300243struct mlx5_ib_create_rwq_ind_tbl_resp {
244 __u32 response_length;
245 __u32 reserved;
246};
247
Yishai Hadas79b20a62016-05-23 15:20:50 +0300248struct mlx5_ib_modify_wq {
249 __u32 comp_mask;
250 __u32 reserved;
251};
252
Haggai Abramovskycfb5e082016-01-14 19:12:57 +0200253static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
254 struct mlx5_ib_create_qp *ucmd,
255 int inlen,
256 u32 *user_index)
257{
258 u8 cqe_version = ucontext->cqe_version;
259
260 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
261 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
262 return 0;
263
264 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
265 !!cqe_version))
266 return -EINVAL;
267
268 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
269}
270
271static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
272 struct mlx5_ib_create_srq *ucmd,
273 int inlen,
274 u32 *user_index)
275{
276 u8 cqe_version = ucontext->cqe_version;
277
278 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
279 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
280 return 0;
281
282 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
283 !!cqe_version))
284 return -EINVAL;
285
286 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
287}
Eli Cohene126ba92013-07-07 17:25:49 +0300288#endif /* MLX5_IB_USER_H */