blob: 6a47ba85c54c1109f32db61d700d81a23790474e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08008 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
Ralph Campbell9b513092006-12-12 14:27:41 -080044#include <linux/dma-mapping.h>
Michael S. Tsirkin459d6e22007-02-04 14:11:55 -080045#include <linux/kref.h>
Dotan Barakbfb3ea12007-07-31 16:49:15 +030046#include <linux/list.h>
47#include <linux/rwsem.h>
Tejun Heof0626712010-10-19 15:24:36 +000048#include <linux/workqueue.h>
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080049#include <linux/irq_poll.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020050#include <uapi/linux/if_ether.h>
Somnath Koturc865f242015-12-23 14:56:51 +020051#include <net/ipv6.h>
52#include <net/ip.h>
Matan Barak301a7212015-12-15 20:30:10 +020053#include <linux/string.h>
54#include <linux/slab.h>
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -070055#include <linux/netdevice.h>
Parav Pandit01b67112018-11-16 03:50:57 +020056#include <linux/refcount.h>
Eli Cohen50174a72016-03-11 22:58:38 +020057#include <linux/if_link.h>
Arun Sharma600634972011-07-26 16:09:06 -070058#include <linux/atomic.h>
Haggai Eran882214e2014-12-11 17:04:18 +020059#include <linux/mmu_notifier.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080060#include <linux/uaccess.h>
Parav Pandit43579b52017-01-10 00:02:14 +000061#include <linux/cgroup_rdma.h>
Leon Romanovskyf6316032019-03-28 15:12:58 +020062#include <linux/irqflags.h>
63#include <linux/preempt.h>
Yamin Friedmanda662972019-07-08 13:59:03 +030064#include <linux/dim.h>
Nicolas Dichtelea6819e2017-03-27 14:20:14 +020065#include <uapi/rdma/ib_user_verbs.h>
Mark Zhang413d3342019-07-02 13:02:34 +030066#include <rdma/rdma_counter.h>
Leon Romanovsky02d88832018-01-28 11:17:20 +020067#include <rdma/restrack.h>
Max Gurtovoy36b1e472019-06-11 18:52:37 +030068#include <rdma/signature.h>
Matan Barak0ede73b2018-03-19 15:02:34 +020069#include <uapi/rdma/rdma_user_ioctl.h>
Matan Barak2eb9bea2018-03-28 09:27:45 +030070#include <uapi/rdma/ib_user_ioctl_verbs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Leon Romanovsky9abb0d12017-06-27 16:49:53 +030072#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
73
Jason Gunthorpeb5231b02018-09-16 20:48:04 +030074struct ib_umem_odp;
75
Tejun Heof0626712010-10-19 15:24:36 +000076extern struct workqueue_struct *ib_wq;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080077extern struct workqueue_struct *ib_comp_wq;
Jack Morgensteinf7948092018-08-27 08:35:55 +030078extern struct workqueue_struct *ib_comp_unbound_wq;
Tejun Heof0626712010-10-19 15:24:36 +000079
Gal Pressman923abb92019-05-01 13:48:13 +030080__printf(3, 4) __cold
81void ibdev_printk(const char *level, const struct ib_device *ibdev,
82 const char *format, ...);
83__printf(2, 3) __cold
84void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
85__printf(2, 3) __cold
86void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
87__printf(2, 3) __cold
88void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
89__printf(2, 3) __cold
90void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
91__printf(2, 3) __cold
92void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
93__printf(2, 3) __cold
94void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
95__printf(2, 3) __cold
96void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
97
98#if defined(CONFIG_DYNAMIC_DEBUG)
99#define ibdev_dbg(__dev, format, args...) \
100 dynamic_ibdev_dbg(__dev, format, ##args)
Gal Pressman923abb92019-05-01 13:48:13 +0300101#else
102__printf(2, 3) __cold
103static inline
104void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
105#endif
106
Gal Pressman05bb4112019-08-01 20:14:46 +0300107#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
108do { \
109 static DEFINE_RATELIMIT_STATE(_rs, \
110 DEFAULT_RATELIMIT_INTERVAL, \
111 DEFAULT_RATELIMIT_BURST); \
112 if (__ratelimit(&_rs)) \
113 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
114} while (0)
115
116#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
117 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
118#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
119 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
120#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
121 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
122#define ibdev_err_ratelimited(ibdev, fmt, ...) \
123 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
124#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
125 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
126#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
127 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
128#define ibdev_info_ratelimited(ibdev, fmt, ...) \
129 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
130
131#if defined(CONFIG_DYNAMIC_DEBUG)
132/* descriptor check is first to prevent flooding with "callbacks suppressed" */
133#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
134do { \
135 static DEFINE_RATELIMIT_STATE(_rs, \
136 DEFAULT_RATELIMIT_INTERVAL, \
137 DEFAULT_RATELIMIT_BURST); \
138 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
139 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
140 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
141 ##__VA_ARGS__); \
142} while (0)
143#else
144__printf(2, 3) __cold
145static inline
146void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
147#endif
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149union ib_gid {
150 u8 raw[16];
151 struct {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700152 __be64 subnet_prefix;
153 __be64 interface_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 } global;
155};
156
Moni Shouae26be1b2015-07-30 18:33:29 +0300157extern union ib_gid zgid;
158
Matan Barakb39ffa12015-12-23 14:56:47 +0200159enum ib_gid_type {
160 /* If link layer is Ethernet, this is RoCE V1 */
161 IB_GID_TYPE_IB = 0,
162 IB_GID_TYPE_ROCE = 0,
Matan Barak7766a992015-12-23 14:56:50 +0200163 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
Matan Barakb39ffa12015-12-23 14:56:47 +0200164 IB_GID_TYPE_SIZE
165};
166
Moni Shoua7ead4bc2016-01-14 17:50:38 +0200167#define ROCE_V2_UDP_DPORT 4791
Matan Barak03db3a22015-07-30 18:33:26 +0300168struct ib_gid_attr {
Parav Pandit943bd982019-05-02 10:48:07 +0300169 struct net_device __rcu *ndev;
Parav Pandit598ff6b2018-04-01 15:08:21 +0300170 struct ib_device *device;
Parav Panditb150c382018-06-05 08:40:15 +0300171 union ib_gid gid;
Parav Pandit598ff6b2018-04-01 15:08:21 +0300172 enum ib_gid_type gid_type;
173 u16 index;
174 u8 port_num;
Matan Barak03db3a22015-07-30 18:33:26 +0300175};
176
Eli Cohena0c1b2a2016-03-11 22:58:37 +0200177enum {
178 /* set the local administered indication */
179 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
180};
181
Tom Tucker07ebafb2006-08-03 16:02:42 -0500182enum rdma_transport_type {
183 RDMA_TRANSPORT_IB,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000184 RDMA_TRANSPORT_IWARP,
Upinder Malhi248567f2014-01-09 14:48:19 -0800185 RDMA_TRANSPORT_USNIC,
Gal Pressmanf95be3d2019-05-05 20:59:21 +0300186 RDMA_TRANSPORT_USNIC_UDP,
187 RDMA_TRANSPORT_UNSPECIFIED,
Tom Tucker07ebafb2006-08-03 16:02:42 -0500188};
189
Michael Wang6b90a6d2015-05-05 14:50:18 +0200190enum rdma_protocol_type {
191 RDMA_PROTOCOL_IB,
192 RDMA_PROTOCOL_IBOE,
193 RDMA_PROTOCOL_IWARP,
194 RDMA_PROTOCOL_USNIC_UDP
195};
196
Roland Dreier8385fd82014-06-04 10:00:16 -0700197__attribute_const__ enum rdma_transport_type
Jason Gunthorpe5d60c112019-06-13 21:38:17 -0300198rdma_node_get_transport(unsigned int node_type);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500199
Somnath Koturc865f242015-12-23 14:56:51 +0200200enum rdma_network_type {
201 RDMA_NETWORK_IB,
202 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
203 RDMA_NETWORK_IPV4,
204 RDMA_NETWORK_IPV6
205};
206
207static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
208{
209 if (network_type == RDMA_NETWORK_IPV4 ||
210 network_type == RDMA_NETWORK_IPV6)
211 return IB_GID_TYPE_ROCE_UDP_ENCAP;
212
213 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
214 return IB_GID_TYPE_IB;
215}
216
Parav Pandit47ec3862018-06-13 10:22:06 +0300217static inline enum rdma_network_type
218rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
Somnath Koturc865f242015-12-23 14:56:51 +0200219{
Parav Pandit47ec3862018-06-13 10:22:06 +0300220 if (attr->gid_type == IB_GID_TYPE_IB)
Somnath Koturc865f242015-12-23 14:56:51 +0200221 return RDMA_NETWORK_IB;
222
Parav Pandit47ec3862018-06-13 10:22:06 +0300223 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
Somnath Koturc865f242015-12-23 14:56:51 +0200224 return RDMA_NETWORK_IPV4;
225 else
226 return RDMA_NETWORK_IPV6;
227}
228
Eli Cohena3f5ada2010-09-27 17:51:10 -0700229enum rdma_link_layer {
230 IB_LINK_LAYER_UNSPECIFIED,
231 IB_LINK_LAYER_INFINIBAND,
232 IB_LINK_LAYER_ETHERNET,
233};
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235enum ib_device_cap_flags {
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200236 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
237 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
238 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
239 IB_DEVICE_RAW_MULTI = (1 << 3),
240 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
241 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
242 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
243 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
244 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300245 /* Not in use, former INIT_TYPE = (1 << 9),*/
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200246 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
247 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
248 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
249 IB_DEVICE_SRQ_RESIZE = (1 << 13),
250 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
Christoph Hellwigb1adc712015-12-23 19:12:45 +0100251
252 /*
253 * This device supports a per-device lkey or stag that can be
254 * used without performing a memory registration for the local
255 * memory. Note that ULPs should never check this flag, but
256 * instead of use the local_dma_lkey flag in the ib_pd structure,
257 * which will always contain a usable lkey.
258 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200259 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300260 /* Reserved, old SEND_W_INV = (1 << 16),*/
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200261 IB_DEVICE_MEM_WINDOW = (1 << 17),
Eli Cohene0605d92008-01-30 18:30:57 +0200262 /*
263 * Devices should set IB_DEVICE_UD_IP_SUM if they support
264 * insertion of UDP and TCP checksum on outgoing UD IPoIB
265 * messages and can verify the validity of checksum for
266 * incoming messages. Setting this flag implies that the
267 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
268 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200269 IB_DEVICE_UD_IP_CSUM = (1 << 18),
270 IB_DEVICE_UD_TSO = (1 << 19),
271 IB_DEVICE_XRC = (1 << 20),
Christoph Hellwigb1adc712015-12-23 19:12:45 +0100272
273 /*
274 * This device supports the IB "base memory management extension",
275 * which includes support for fast registrations (IB_WR_REG_MR,
276 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
277 * also be set by any iWarp device which must support FRs to comply
278 * to the iWarp verbs spec. iWarp devices also support the
279 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
280 * stag.
281 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200282 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
283 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
284 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
285 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
286 IB_DEVICE_RC_IP_CSUM = (1 << 25),
Noa Osherovichebaaee22017-01-18 15:39:54 +0200287 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200288 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
Leon Romanovsky8a06ce52015-12-20 12:16:10 +0200289 /*
290 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
291 * support execution of WQEs that involve synchronization
292 * of I/O operations with single completion queue managed
293 * by hardware.
294 */
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300295 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200296 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
Israel Rukshinc0a6cbb2019-06-11 18:52:50 +0300297 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
Max Gurtovoy47355b32016-06-06 19:34:39 +0300298 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200299 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
Max Gurtovoyc7e162a2016-06-06 19:34:40 +0300300 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
Noa Osherovichebaaee22017-01-18 15:39:54 +0200301 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
Max Gurtovoyc7e162a2016-06-06 19:34:40 +0300302 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
Vishwanathapura, Niranjana62e45942017-04-12 20:29:21 -0700303 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
Noa Osheroviche1d2e882017-10-29 13:59:44 +0200304 /* The device supports padding incoming writes to cacheline. */
305 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
Steve Wise3856ec42019-02-15 11:03:53 -0800306 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200307};
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309enum ib_atomic_cap {
310 IB_ATOMIC_NONE,
311 IB_ATOMIC_HCA,
312 IB_ATOMIC_GLOB
313};
314
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200315enum ib_odp_general_cap_bits {
Artemy Kovalyov25bf14d2017-01-18 16:58:06 +0200316 IB_ODP_SUPPORT = 1 << 0,
317 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200318};
319
320enum ib_odp_transport_cap_bits {
321 IB_ODP_SUPPORT_SEND = 1 << 0,
322 IB_ODP_SUPPORT_RECV = 1 << 1,
323 IB_ODP_SUPPORT_WRITE = 1 << 2,
324 IB_ODP_SUPPORT_READ = 1 << 3,
325 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
Moni Shouada823342019-01-22 08:48:41 +0200326 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200327};
328
329struct ib_odp_caps {
330 uint64_t general_caps;
331 struct {
332 uint32_t rc_odp_caps;
333 uint32_t uc_odp_caps;
334 uint32_t ud_odp_caps;
Moni Shoua52a72e22019-01-22 08:48:42 +0200335 uint32_t xrc_odp_caps;
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200336 } per_transport_caps;
337};
338
Yishai Hadasccf20562016-08-28 11:28:43 +0300339struct ib_rss_caps {
340 /* Corresponding bit will be set if qp type from
341 * 'enum ib_qp_type' is supported, e.g.
342 * supported_qpts |= 1 << IB_QPT_UD
343 */
344 u32 supported_qpts;
345 u32 max_rwq_indirection_tables;
346 u32 max_rwq_indirection_table_size;
347};
348
Artemy Kovalyov6938fc1e2017-08-17 15:52:03 +0300349enum ib_tm_cap_flags {
Danit Goldberg89705e92019-07-05 19:21:57 +0300350 /* Support tag matching with rendezvous offload for RC transport */
351 IB_TM_CAP_RNDV_RC = 1 << 0,
Artemy Kovalyov6938fc1e2017-08-17 15:52:03 +0300352};
353
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300354struct ib_tm_caps {
Artemy Kovalyov6938fc1e2017-08-17 15:52:03 +0300355 /* Max size of RNDV header */
356 u32 max_rndv_hdr_size;
357 /* Max number of entries in tag matching list */
358 u32 max_num_tags;
359 /* From enum ib_tm_cap_flags */
360 u32 flags;
361 /* Max number of outstanding list operations */
362 u32 max_ops;
363 /* Max number of SGE in tag matching entry */
364 u32 max_sge;
365};
366
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300367struct ib_cq_init_attr {
368 unsigned int cqe;
369 int comp_vector;
370 u32 flags;
371};
372
Yonatan Cohen869ddcf2017-11-13 10:51:13 +0200373enum ib_cq_attr_mask {
374 IB_CQ_MODERATE = 1 << 0,
375};
376
Yonatan Cohen18bd9072017-11-13 10:51:16 +0200377struct ib_cq_caps {
378 u16 max_cq_moderation_count;
379 u16 max_cq_moderation_period;
380};
381
Ariel Levkovichbe934cc2018-04-05 18:53:25 +0300382struct ib_dm_mr_attr {
383 u64 length;
384 u64 offset;
385 u32 access_flags;
386};
387
Ariel Levkovichbee76d72018-04-05 18:53:24 +0300388struct ib_dm_alloc_attr {
389 u64 length;
390 u32 alignment;
391 u32 flags;
392};
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394struct ib_device_attr {
395 u64 fw_ver;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700396 __be64 sys_image_guid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 u64 max_mr_size;
398 u64 page_size_cap;
399 u32 vendor_id;
400 u32 vendor_part_id;
401 u32 hw_ver;
402 int max_qp;
403 int max_qp_wr;
Leon Romanovskyfb532d62016-02-23 10:25:25 +0200404 u64 device_cap_flags;
Steve Wise33023fb2018-06-18 08:05:26 -0700405 int max_send_sge;
406 int max_recv_sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 int max_sge_rd;
408 int max_cq;
409 int max_cqe;
410 int max_mr;
411 int max_pd;
412 int max_qp_rd_atom;
413 int max_ee_rd_atom;
414 int max_res_rd_atom;
415 int max_qp_init_rd_atom;
416 int max_ee_init_rd_atom;
417 enum ib_atomic_cap atomic_cap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300418 enum ib_atomic_cap masked_atomic_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 int max_ee;
420 int max_rdd;
421 int max_mw;
422 int max_raw_ipv6_qp;
423 int max_raw_ethy_qp;
424 int max_mcast_grp;
425 int max_mcast_qp_attach;
426 int max_total_mcast_qp_attach;
427 int max_ah;
428 int max_fmr;
429 int max_map_per_fmr;
430 int max_srq;
431 int max_srq_wr;
432 int max_srq_sge;
Steve Wise00f7ec32008-07-14 23:48:45 -0700433 unsigned int max_fast_reg_page_list_len;
Max Gurtovoy62e3c372019-06-11 18:52:43 +0300434 unsigned int max_pi_fast_reg_page_list_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 u16 max_pkeys;
436 u8 local_ca_ack_delay;
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200437 int sig_prot_cap;
438 int sig_guard_cap;
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200439 struct ib_odp_caps odp_caps;
Matan Barak24306dc2015-06-11 16:35:24 +0300440 uint64_t timestamp_mask;
441 uint64_t hca_core_clock; /* in KHZ */
Yishai Hadasccf20562016-08-28 11:28:43 +0300442 struct ib_rss_caps rss_caps;
443 u32 max_wq_type_rq;
Noa Osherovichebaaee22017-01-18 15:39:54 +0200444 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300445 struct ib_tm_caps tm_caps;
Yonatan Cohen18bd9072017-11-13 10:51:16 +0200446 struct ib_cq_caps cq_caps;
Ariel Levkovich1d8eeb92018-04-05 18:53:23 +0300447 u64 max_dm_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448};
449
450enum ib_mtu {
451 IB_MTU_256 = 1,
452 IB_MTU_512 = 2,
453 IB_MTU_1024 = 3,
454 IB_MTU_2048 = 4,
455 IB_MTU_4096 = 5
456};
457
458static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
459{
460 switch (mtu) {
461 case IB_MTU_256: return 256;
462 case IB_MTU_512: return 512;
463 case IB_MTU_1024: return 1024;
464 case IB_MTU_2048: return 2048;
465 case IB_MTU_4096: return 4096;
466 default: return -1;
467 }
468}
469
Amrani, Ramd3f4aad2016-12-26 08:40:57 +0200470static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
471{
472 if (mtu >= 4096)
473 return IB_MTU_4096;
474 else if (mtu >= 2048)
475 return IB_MTU_2048;
476 else if (mtu >= 1024)
477 return IB_MTU_1024;
478 else if (mtu >= 512)
479 return IB_MTU_512;
480 else
481 return IB_MTU_256;
482}
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484enum ib_port_state {
485 IB_PORT_NOP = 0,
486 IB_PORT_DOWN = 1,
487 IB_PORT_INIT = 2,
488 IB_PORT_ARMED = 3,
489 IB_PORT_ACTIVE = 4,
490 IB_PORT_ACTIVE_DEFER = 5
491};
492
Kamal Heib72a77202019-08-07 13:31:35 +0300493enum ib_port_phys_state {
494 IB_PORT_PHYS_STATE_SLEEP = 1,
495 IB_PORT_PHYS_STATE_POLLING = 2,
496 IB_PORT_PHYS_STATE_DISABLED = 3,
497 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
498 IB_PORT_PHYS_STATE_LINK_UP = 5,
499 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
500 IB_PORT_PHYS_STATE_PHY_TEST = 7,
501};
502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503enum ib_port_width {
504 IB_WIDTH_1X = 1,
Michael Guralnikdbabf682018-12-09 11:49:49 +0200505 IB_WIDTH_2X = 16,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 IB_WIDTH_4X = 2,
507 IB_WIDTH_8X = 4,
508 IB_WIDTH_12X = 8
509};
510
511static inline int ib_width_enum_to_int(enum ib_port_width width)
512{
513 switch (width) {
514 case IB_WIDTH_1X: return 1;
Michael Guralnikdbabf682018-12-09 11:49:49 +0200515 case IB_WIDTH_2X: return 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 case IB_WIDTH_4X: return 4;
517 case IB_WIDTH_8X: return 8;
518 case IB_WIDTH_12X: return 12;
519 default: return -1;
520 }
521}
522
Or Gerlitz2e966912012-02-28 18:49:50 +0200523enum ib_port_speed {
524 IB_SPEED_SDR = 1,
525 IB_SPEED_DDR = 2,
526 IB_SPEED_QDR = 4,
527 IB_SPEED_FDR10 = 8,
528 IB_SPEED_FDR = 16,
Noa Osherovich12113a32017-04-20 20:53:31 +0300529 IB_SPEED_EDR = 32,
530 IB_SPEED_HDR = 64
Or Gerlitz2e966912012-02-28 18:49:50 +0200531};
532
Christoph Lameterb40f4752016-05-16 12:49:33 -0500533/**
534 * struct rdma_hw_stats
Mark Bloche9451302018-03-27 15:51:05 +0300535 * @lock - Mutex to protect parallel write access to lifespan and values
536 * of counters, which are 64bits and not guaranteeed to be written
537 * atomicaly on 32bits systems.
Christoph Lameterb40f4752016-05-16 12:49:33 -0500538 * @timestamp - Used by the core code to track when the last update was
539 * @lifespan - Used by the core code to determine how old the counters
540 * should be before being updated again. Stored in jiffies, defaults
541 * to 10 milliseconds, drivers can override the default be specifying
542 * their own value during their allocation routine.
543 * @name - Array of pointers to static names used for the counters in
544 * directory.
545 * @num_counters - How many hardware counters there are. If name is
546 * shorter than this number, a kernel oops will result. Driver authors
547 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
548 * in their code to prevent this.
549 * @value - Array of u64 counters that are accessed by the sysfs code and
550 * filled in by the drivers get_stats routine
551 */
552struct rdma_hw_stats {
Mark Bloche9451302018-03-27 15:51:05 +0300553 struct mutex lock; /* Protect lifespan and values[] */
Christoph Lameterb40f4752016-05-16 12:49:33 -0500554 unsigned long timestamp;
555 unsigned long lifespan;
556 const char * const *names;
557 int num_counters;
558 u64 value[];
Steve Wise7f624d02008-07-14 23:48:48 -0700559};
560
Christoph Lameterb40f4752016-05-16 12:49:33 -0500561#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
562/**
563 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
564 * for drivers.
565 * @names - Array of static const char *
566 * @num_counters - How many elements in array
567 * @lifespan - How many milliseconds between updates
568 */
569static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
570 const char * const *names, int num_counters,
571 unsigned long lifespan)
572{
573 struct rdma_hw_stats *stats;
Steve Wise7f624d02008-07-14 23:48:48 -0700574
Christoph Lameterb40f4752016-05-16 12:49:33 -0500575 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
576 GFP_KERNEL);
577 if (!stats)
578 return NULL;
579 stats->names = names;
580 stats->num_counters = num_counters;
581 stats->lifespan = msecs_to_jiffies(lifespan);
Steve Wise7f624d02008-07-14 23:48:48 -0700582
Christoph Lameterb40f4752016-05-16 12:49:33 -0500583 return stats;
584}
585
Steve Wise7f624d02008-07-14 23:48:48 -0700586
Ira Weinyf9b22e32015-05-13 20:02:59 -0400587/* Define bits for the various functionality this port needs to be supported by
588 * the core.
589 */
590/* Management 0x00000FFF */
591#define RDMA_CORE_CAP_IB_MAD 0x00000001
592#define RDMA_CORE_CAP_IB_SMI 0x00000002
593#define RDMA_CORE_CAP_IB_CM 0x00000004
594#define RDMA_CORE_CAP_IW_CM 0x00000008
595#define RDMA_CORE_CAP_IB_SA 0x00000010
Ira Weiny65995fe2015-06-06 14:38:32 -0400596#define RDMA_CORE_CAP_OPA_MAD 0x00000020
Ira Weinyf9b22e32015-05-13 20:02:59 -0400597
598/* Address format 0x000FF000 */
599#define RDMA_CORE_CAP_AF_IB 0x00001000
600#define RDMA_CORE_CAP_ETH_AH 0x00002000
Dasaratharaman Chandramouli94d595c2017-03-20 19:38:09 -0400601#define RDMA_CORE_CAP_OPA_AH 0x00004000
Artemy Kovalyovb02289b2018-07-04 15:57:50 +0300602#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
Ira Weinyf9b22e32015-05-13 20:02:59 -0400603
604/* Protocol 0xFFF00000 */
605#define RDMA_CORE_CAP_PROT_IB 0x00100000
606#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
607#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
Matan Barak7766a992015-12-23 14:56:50 +0200608#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
Or Gerlitzaa773bd2017-01-24 13:02:35 +0200609#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
Or Gerlitzce1e0552017-01-24 13:02:38 +0200610#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
Ira Weinyf9b22e32015-05-13 20:02:59 -0400611
Artemy Kovalyovb02289b2018-07-04 15:57:50 +0300612#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
613 | RDMA_CORE_CAP_PROT_ROCE \
614 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
615
Ira Weinyf9b22e32015-05-13 20:02:59 -0400616#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
617 | RDMA_CORE_CAP_IB_MAD \
618 | RDMA_CORE_CAP_IB_SMI \
619 | RDMA_CORE_CAP_IB_CM \
620 | RDMA_CORE_CAP_IB_SA \
621 | RDMA_CORE_CAP_AF_IB)
622#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
623 | RDMA_CORE_CAP_IB_MAD \
624 | RDMA_CORE_CAP_IB_CM \
Ira Weinyf9b22e32015-05-13 20:02:59 -0400625 | RDMA_CORE_CAP_AF_IB \
626 | RDMA_CORE_CAP_ETH_AH)
Matan Barak7766a992015-12-23 14:56:50 +0200627#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
628 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
629 | RDMA_CORE_CAP_IB_MAD \
630 | RDMA_CORE_CAP_IB_CM \
631 | RDMA_CORE_CAP_AF_IB \
632 | RDMA_CORE_CAP_ETH_AH)
Ira Weinyf9b22e32015-05-13 20:02:59 -0400633#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
634 | RDMA_CORE_CAP_IW_CM)
Ira Weiny65995fe2015-06-06 14:38:32 -0400635#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
636 | RDMA_CORE_CAP_OPA_MAD)
Ira Weinyf9b22e32015-05-13 20:02:59 -0400637
Or Gerlitzaa773bd2017-01-24 13:02:35 +0200638#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
639
Or Gerlitzce1e0552017-01-24 13:02:38 +0200640#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642struct ib_port_attr {
Eli Cohenfad61ad2016-03-11 22:58:36 +0200643 u64 subnet_prefix;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 enum ib_port_state state;
645 enum ib_mtu max_mtu;
646 enum ib_mtu active_mtu;
647 int gid_tbl_len;
Jason Gunthorpe2f944c02018-07-04 15:57:48 +0300648 unsigned int ip_gids:1;
649 /* This is the value from PortInfo CapabilityMask, defined by IBA */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 u32 port_cap_flags;
651 u32 max_msg_sz;
652 u32 bad_pkey_cntr;
653 u32 qkey_viol_cntr;
654 u16 pkey_tbl_len;
Dasaratharaman Chandramoulidb585402017-06-08 13:37:48 -0400655 u32 sm_lid;
Dasaratharaman Chandramouli582faf32017-06-08 13:37:47 -0400656 u32 lid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 u8 lmc;
658 u8 max_vl_num;
659 u8 sm_sl;
660 u8 subnet_timeout;
661 u8 init_type_reply;
662 u8 active_width;
663 u8 active_speed;
664 u8 phys_state;
Michael Guralnik1e8f43b2018-12-09 11:49:48 +0200665 u16 port_cap_flags2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666};
667
668enum ib_device_modify_flags {
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800669 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
670 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671};
672
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700673#define IB_DEVICE_NODE_DESC_MAX 64
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675struct ib_device_modify {
676 u64 sys_image_guid;
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700677 char node_desc[IB_DEVICE_NODE_DESC_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678};
679
680enum ib_port_modify_flags {
681 IB_PORT_SHUTDOWN = 1,
682 IB_PORT_INIT_TYPE = (1<<2),
Vishwanathapura, Niranjanacb493662017-06-01 17:04:02 -0700683 IB_PORT_RESET_QKEY_CNTR = (1<<3),
684 IB_PORT_OPA_MASK_CHG = (1<<4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685};
686
687struct ib_port_modify {
688 u32 set_port_cap_mask;
689 u32 clr_port_cap_mask;
690 u8 init_type;
691};
692
693enum ib_event_type {
694 IB_EVENT_CQ_ERR,
695 IB_EVENT_QP_FATAL,
696 IB_EVENT_QP_REQ_ERR,
697 IB_EVENT_QP_ACCESS_ERR,
698 IB_EVENT_COMM_EST,
699 IB_EVENT_SQ_DRAINED,
700 IB_EVENT_PATH_MIG,
701 IB_EVENT_PATH_MIG_ERR,
702 IB_EVENT_DEVICE_FATAL,
703 IB_EVENT_PORT_ACTIVE,
704 IB_EVENT_PORT_ERR,
705 IB_EVENT_LID_CHANGE,
706 IB_EVENT_PKEY_CHANGE,
Roland Dreierd41fcc62005-08-18 12:23:08 -0700707 IB_EVENT_SM_CHANGE,
708 IB_EVENT_SRQ_ERR,
709 IB_EVENT_SRQ_LIMIT_REACHED,
Leonid Arsh63942c92006-06-17 20:37:35 -0700710 IB_EVENT_QP_LAST_WQE_REACHED,
Or Gerlitz761d90e2011-06-15 14:39:29 +0000711 IB_EVENT_CLIENT_REREGISTER,
712 IB_EVENT_GID_CHANGE,
Yishai Hadasf213c052016-05-23 15:20:49 +0300713 IB_EVENT_WQ_FATAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714};
715
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700716const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718struct ib_event {
719 struct ib_device *device;
720 union {
721 struct ib_cq *cq;
722 struct ib_qp *qp;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700723 struct ib_srq *srq;
Yishai Hadasf213c052016-05-23 15:20:49 +0300724 struct ib_wq *wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 u8 port_num;
726 } element;
727 enum ib_event_type event;
728};
729
730struct ib_event_handler {
731 struct ib_device *device;
732 void (*handler)(struct ib_event_handler *, struct ib_event *);
733 struct list_head list;
734};
735
736#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
737 do { \
738 (_ptr)->device = _device; \
739 (_ptr)->handler = _handler; \
740 INIT_LIST_HEAD(&(_ptr)->list); \
741 } while (0)
742
743struct ib_global_route {
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +0300744 const struct ib_gid_attr *sgid_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 union ib_gid dgid;
746 u32 flow_label;
747 u8 sgid_index;
748 u8 hop_limit;
749 u8 traffic_class;
750};
751
Hal Rosenstock513789e2005-07-27 11:45:34 -0700752struct ib_grh {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700753 __be32 version_tclass_flow;
754 __be16 paylen;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700755 u8 next_hdr;
756 u8 hop_limit;
757 union ib_gid sgid;
758 union ib_gid dgid;
759};
760
Somnath Koturc865f242015-12-23 14:56:51 +0200761union rdma_network_hdr {
762 struct ib_grh ibgrh;
763 struct {
764 /* The IB spec states that if it's IPv4, the header
765 * is located in the last 20 bytes of the header.
766 */
767 u8 reserved[20];
768 struct iphdr roce4grh;
769 };
770};
771
Don Hiatt7dafbab2017-05-12 09:19:55 -0700772#define IB_QPN_MASK 0xFFFFFF
773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774enum {
775 IB_MULTICAST_QPN = 0xffffff
776};
777
Harvey Harrisonf3a7c662009-02-14 22:58:35 -0800778#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
Dennis Dalessandrob4e64392016-01-06 10:04:31 -0800779#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
Sean Hefty97f52eb2005-08-13 21:05:57 -0700780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781enum ib_ah_flags {
782 IB_AH_GRH = 1
783};
784
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700785enum ib_rate {
786 IB_RATE_PORT_CURRENT = 0,
787 IB_RATE_2_5_GBPS = 2,
788 IB_RATE_5_GBPS = 5,
789 IB_RATE_10_GBPS = 3,
790 IB_RATE_20_GBPS = 6,
791 IB_RATE_30_GBPS = 4,
792 IB_RATE_40_GBPS = 7,
793 IB_RATE_60_GBPS = 8,
794 IB_RATE_80_GBPS = 9,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300795 IB_RATE_120_GBPS = 10,
796 IB_RATE_14_GBPS = 11,
797 IB_RATE_56_GBPS = 12,
798 IB_RATE_112_GBPS = 13,
799 IB_RATE_168_GBPS = 14,
800 IB_RATE_25_GBPS = 15,
801 IB_RATE_100_GBPS = 16,
802 IB_RATE_200_GBPS = 17,
Michael Guralnika5a5d192018-12-09 11:49:50 +0200803 IB_RATE_300_GBPS = 18,
804 IB_RATE_28_GBPS = 19,
805 IB_RATE_50_GBPS = 20,
806 IB_RATE_400_GBPS = 21,
807 IB_RATE_600_GBPS = 22,
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700808};
809
810/**
811 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
812 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
813 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
814 * @rate: rate to convert.
815 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700816__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700817
818/**
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300819 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
820 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
821 * @rate: rate to convert.
822 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700823__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300824
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200825
826/**
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300827 * enum ib_mr_type - memory region type
828 * @IB_MR_TYPE_MEM_REG: memory region that is used for
829 * normal registration
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200830 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
831 * register any arbitrary sg lists (without
832 * the normal mr constraints - see
833 * ib_map_mr_sg)
Max Gurtovoya0bc0992019-06-11 18:52:38 +0300834 * @IB_MR_TYPE_DM: memory region that is used for device
835 * memory registration
836 * @IB_MR_TYPE_USER: memory region that is used for the user-space
837 * application
838 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations
839 * without address translations (VA=PA)
Israel Rukshin26bc7ea2019-06-11 18:52:39 +0300840 * @IB_MR_TYPE_INTEGRITY: memory region that is used for
841 * data integrity operations
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200842 */
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300843enum ib_mr_type {
844 IB_MR_TYPE_MEM_REG,
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200845 IB_MR_TYPE_SG_GAPS,
Max Gurtovoya0bc0992019-06-11 18:52:38 +0300846 IB_MR_TYPE_DM,
847 IB_MR_TYPE_USER,
848 IB_MR_TYPE_DMA,
Israel Rukshin26bc7ea2019-06-11 18:52:39 +0300849 IB_MR_TYPE_INTEGRITY,
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200850};
851
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200852enum ib_mr_status_check {
853 IB_MR_CHECK_SIG_STATUS = 1,
854};
855
856/**
857 * struct ib_mr_status - Memory region status container
858 *
859 * @fail_status: Bitmask of MR checks status. For each
860 * failed check a corresponding status bit is set.
861 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
862 * failure.
863 */
864struct ib_mr_status {
865 u32 fail_status;
866 struct ib_sig_err sig_err;
867};
868
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300869/**
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700870 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
871 * enum.
872 * @mult: multiple to convert.
873 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700874__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700875
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400876enum rdma_ah_attr_type {
Don Hiatt87daac62018-02-01 10:57:03 -0800877 RDMA_AH_ATTR_TYPE_UNDEFINED,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400878 RDMA_AH_ATTR_TYPE_IB,
879 RDMA_AH_ATTR_TYPE_ROCE,
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400880 RDMA_AH_ATTR_TYPE_OPA,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400881};
882
883struct ib_ah_attr {
884 u16 dlid;
885 u8 src_path_bits;
886};
887
888struct roce_ah_attr {
889 u8 dmac[ETH_ALEN];
890};
891
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400892struct opa_ah_attr {
893 u32 dlid;
894 u8 src_path_bits;
Don Hiattd98bb7f2017-08-04 13:54:16 -0700895 bool make_grd;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400896};
897
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400898struct rdma_ah_attr {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 struct ib_global_route grh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 u8 sl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 u8 static_rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 u8 port_num;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400903 u8 ah_flags;
904 enum rdma_ah_attr_type type;
905 union {
906 struct ib_ah_attr ib;
907 struct roce_ah_attr roce;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400908 struct opa_ah_attr opa;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400909 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910};
911
912enum ib_wc_status {
913 IB_WC_SUCCESS,
914 IB_WC_LOC_LEN_ERR,
915 IB_WC_LOC_QP_OP_ERR,
916 IB_WC_LOC_EEC_OP_ERR,
917 IB_WC_LOC_PROT_ERR,
918 IB_WC_WR_FLUSH_ERR,
919 IB_WC_MW_BIND_ERR,
920 IB_WC_BAD_RESP_ERR,
921 IB_WC_LOC_ACCESS_ERR,
922 IB_WC_REM_INV_REQ_ERR,
923 IB_WC_REM_ACCESS_ERR,
924 IB_WC_REM_OP_ERR,
925 IB_WC_RETRY_EXC_ERR,
926 IB_WC_RNR_RETRY_EXC_ERR,
927 IB_WC_LOC_RDD_VIOL_ERR,
928 IB_WC_REM_INV_RD_REQ_ERR,
929 IB_WC_REM_ABORT_ERR,
930 IB_WC_INV_EECN_ERR,
931 IB_WC_INV_EEC_STATE_ERR,
932 IB_WC_FATAL_ERR,
933 IB_WC_RESP_TIMEOUT_ERR,
934 IB_WC_GENERAL_ERR
935};
936
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700937const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939enum ib_wc_opcode {
940 IB_WC_SEND,
941 IB_WC_RDMA_WRITE,
942 IB_WC_RDMA_READ,
943 IB_WC_COMP_SWAP,
944 IB_WC_FETCH_ADD,
Eli Cohenc93570f2008-04-16 21:09:27 -0700945 IB_WC_LSO,
Steve Wise00f7ec32008-07-14 23:48:45 -0700946 IB_WC_LOCAL_INV,
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +0300947 IB_WC_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300948 IB_WC_MASKED_COMP_SWAP,
949 IB_WC_MASKED_FETCH_ADD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950/*
951 * Set value of IB_WC_RECV so consumers can test if a completion is a
952 * receive by testing (opcode & IB_WC_RECV).
953 */
954 IB_WC_RECV = 1 << 7,
955 IB_WC_RECV_RDMA_WITH_IMM
956};
957
958enum ib_wc_flags {
959 IB_WC_GRH = 1,
Steve Wise00f7ec32008-07-14 23:48:45 -0700960 IB_WC_WITH_IMM = (1<<1),
961 IB_WC_WITH_INVALIDATE = (1<<2),
Or Gerlitzd927d502012-01-11 19:03:51 +0200962 IB_WC_IP_CSUM_OK = (1<<3),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200963 IB_WC_WITH_SMAC = (1<<4),
964 IB_WC_WITH_VLAN = (1<<5),
Somnath Koturc865f242015-12-23 14:56:51 +0200965 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966};
967
968struct ib_wc {
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -0800969 union {
970 u64 wr_id;
971 struct ib_cqe *wr_cqe;
972 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 enum ib_wc_status status;
974 enum ib_wc_opcode opcode;
975 u32 vendor_err;
976 u32 byte_len;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200977 struct ib_qp *qp;
Steve Wise00f7ec32008-07-14 23:48:45 -0700978 union {
979 __be32 imm_data;
980 u32 invalidate_rkey;
981 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 u32 src_qp;
Bodong Wangcd2a6e72018-01-12 07:58:41 +0200983 u32 slid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 int wc_flags;
985 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 u8 sl;
987 u8 dlid_path_bits;
988 u8 port_num; /* valid only for DR SMPs on switches */
Matan Barakdd5f03b2013-12-12 18:03:11 +0200989 u8 smac[ETH_ALEN];
990 u16 vlan_id;
Somnath Koturc865f242015-12-23 14:56:51 +0200991 u8 network_hdr_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992};
993
Roland Dreiered23a722007-05-06 21:02:48 -0700994enum ib_cq_notify_flags {
995 IB_CQ_SOLICITED = 1 << 0,
996 IB_CQ_NEXT_COMP = 1 << 1,
997 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
998 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999};
1000
Sean Hefty96104ed2011-05-23 16:31:36 -07001001enum ib_srq_type {
Sean Hefty418d5132011-05-23 19:42:29 -07001002 IB_SRQT_BASIC,
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001003 IB_SRQT_XRC,
1004 IB_SRQT_TM,
Sean Hefty96104ed2011-05-23 16:31:36 -07001005};
1006
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001007static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1008{
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001009 return srq_type == IB_SRQT_XRC ||
1010 srq_type == IB_SRQT_TM;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001011}
1012
Roland Dreierd41fcc62005-08-18 12:23:08 -07001013enum ib_srq_attr_mask {
1014 IB_SRQ_MAX_WR = 1 << 0,
1015 IB_SRQ_LIMIT = 1 << 1,
1016};
1017
1018struct ib_srq_attr {
1019 u32 max_wr;
1020 u32 max_sge;
1021 u32 srq_limit;
1022};
1023
1024struct ib_srq_init_attr {
1025 void (*event_handler)(struct ib_event *, void *);
1026 void *srq_context;
1027 struct ib_srq_attr attr;
Sean Hefty96104ed2011-05-23 16:31:36 -07001028 enum ib_srq_type srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -07001029
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001030 struct {
1031 struct ib_cq *cq;
1032 union {
1033 struct {
1034 struct ib_xrcd *xrcd;
1035 } xrc;
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001036
1037 struct {
1038 u32 max_num_tags;
1039 } tag_matching;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001040 };
Sean Hefty418d5132011-05-23 19:42:29 -07001041 } ext;
Roland Dreierd41fcc62005-08-18 12:23:08 -07001042};
1043
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044struct ib_qp_cap {
1045 u32 max_send_wr;
1046 u32 max_recv_wr;
1047 u32 max_send_sge;
1048 u32 max_recv_sge;
1049 u32 max_inline_data;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001050
1051 /*
1052 * Maximum number of rdma_rw_ctx structures in flight at a time.
1053 * ib_create_qp() will calculate the right amount of neededed WRs
1054 * and MRs based on this.
1055 */
1056 u32 max_rdma_ctxs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057};
1058
1059enum ib_sig_type {
1060 IB_SIGNAL_ALL_WR,
1061 IB_SIGNAL_REQ_WR
1062};
1063
1064enum ib_qp_type {
1065 /*
1066 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1067 * here (and in that order) since the MAD layer uses them as
1068 * indices into a 2-entry table.
1069 */
1070 IB_QPT_SMI,
1071 IB_QPT_GSI,
1072
1073 IB_QPT_RC,
1074 IB_QPT_UC,
1075 IB_QPT_UD,
1076 IB_QPT_RAW_IPV6,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001077 IB_QPT_RAW_ETHERTYPE,
Or Gerlitzc938a612012-03-01 12:17:51 +02001078 IB_QPT_RAW_PACKET = 8,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001079 IB_QPT_XRC_INI = 9,
1080 IB_QPT_XRC_TGT,
Jack Morgenstein0134f162013-07-07 17:25:52 +03001081 IB_QPT_MAX,
Moni Shoua8011c1e2018-01-02 16:19:30 +02001082 IB_QPT_DRIVER = 0xFF,
Jack Morgenstein0134f162013-07-07 17:25:52 +03001083 /* Reserve a range for qp types internal to the low level driver.
1084 * These qp types will not be visible at the IB core layer, so the
1085 * IB_QPT_MAX usages should not be affected in the core layer
1086 */
1087 IB_QPT_RESERVED1 = 0x1000,
1088 IB_QPT_RESERVED2,
1089 IB_QPT_RESERVED3,
1090 IB_QPT_RESERVED4,
1091 IB_QPT_RESERVED5,
1092 IB_QPT_RESERVED6,
1093 IB_QPT_RESERVED7,
1094 IB_QPT_RESERVED8,
1095 IB_QPT_RESERVED9,
1096 IB_QPT_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097};
1098
Eli Cohenb846f252008-04-16 21:09:27 -07001099enum ib_qp_create_flags {
Ron Livne47ee1b92008-07-14 23:48:48 -07001100 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1101 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
Leon Romanovsky8a06ce52015-12-20 12:16:10 +02001102 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1103 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1104 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
Matan Barak90f1d1b2013-11-07 15:25:12 +02001105 IB_QP_CREATE_NETIF_QP = 1 << 5,
Israel Rukshinc0a6cbb2019-06-11 18:52:50 +03001106 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
Leon Romanovsky7855f582017-05-23 14:38:16 +03001107 /* FREE = 1 << 7, */
Majd Dibbinyb531b902016-04-17 17:19:36 +03001108 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
Noa Osherovich9c2b2702017-01-18 15:39:56 +02001109 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
Yishai Hadas02984cc2017-06-08 16:15:06 +03001110 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
Noa Osheroviche1d2e882017-10-29 13:59:44 +02001111 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
Jack Morgensteind2b57062012-08-03 08:40:37 +00001112 /* reserve bits 26-31 for low level drivers' internal use */
1113 IB_QP_CREATE_RESERVED_START = 1 << 26,
1114 IB_QP_CREATE_RESERVED_END = 1 << 31,
Eli Cohenb846f252008-04-16 21:09:27 -07001115};
1116
Yishai Hadas73c40c62013-08-01 18:49:53 +03001117/*
1118 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1119 * callback to destroy the passed in QP.
1120 */
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122struct ib_qp_init_attr {
Chuck Levereb93c82e2018-09-04 11:45:20 -04001123 /* Consumer's event_handler callback must not block */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 void (*event_handler)(struct ib_event *, void *);
Chuck Levereb93c82e2018-09-04 11:45:20 -04001125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 void *qp_context;
1127 struct ib_cq *send_cq;
1128 struct ib_cq *recv_cq;
1129 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001130 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 struct ib_qp_cap cap;
1132 enum ib_sig_type sq_sig_type;
1133 enum ib_qp_type qp_type;
Nathan Chancellorb56511c2018-09-24 12:57:16 -07001134 u32 create_flags;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001135
1136 /*
1137 * Only needed for special QP types, or when using the RW API.
1138 */
1139 u8 port_num;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001140 struct ib_rwq_ind_table *rwq_ind_tbl;
Yishai Hadas02984cc2017-06-08 16:15:06 +03001141 u32 source_qpn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142};
1143
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001144struct ib_qp_open_attr {
1145 void (*event_handler)(struct ib_event *, void *);
1146 void *qp_context;
1147 u32 qp_num;
1148 enum ib_qp_type qp_type;
1149};
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151enum ib_rnr_timeout {
1152 IB_RNR_TIMER_655_36 = 0,
1153 IB_RNR_TIMER_000_01 = 1,
1154 IB_RNR_TIMER_000_02 = 2,
1155 IB_RNR_TIMER_000_03 = 3,
1156 IB_RNR_TIMER_000_04 = 4,
1157 IB_RNR_TIMER_000_06 = 5,
1158 IB_RNR_TIMER_000_08 = 6,
1159 IB_RNR_TIMER_000_12 = 7,
1160 IB_RNR_TIMER_000_16 = 8,
1161 IB_RNR_TIMER_000_24 = 9,
1162 IB_RNR_TIMER_000_32 = 10,
1163 IB_RNR_TIMER_000_48 = 11,
1164 IB_RNR_TIMER_000_64 = 12,
1165 IB_RNR_TIMER_000_96 = 13,
1166 IB_RNR_TIMER_001_28 = 14,
1167 IB_RNR_TIMER_001_92 = 15,
1168 IB_RNR_TIMER_002_56 = 16,
1169 IB_RNR_TIMER_003_84 = 17,
1170 IB_RNR_TIMER_005_12 = 18,
1171 IB_RNR_TIMER_007_68 = 19,
1172 IB_RNR_TIMER_010_24 = 20,
1173 IB_RNR_TIMER_015_36 = 21,
1174 IB_RNR_TIMER_020_48 = 22,
1175 IB_RNR_TIMER_030_72 = 23,
1176 IB_RNR_TIMER_040_96 = 24,
1177 IB_RNR_TIMER_061_44 = 25,
1178 IB_RNR_TIMER_081_92 = 26,
1179 IB_RNR_TIMER_122_88 = 27,
1180 IB_RNR_TIMER_163_84 = 28,
1181 IB_RNR_TIMER_245_76 = 29,
1182 IB_RNR_TIMER_327_68 = 30,
1183 IB_RNR_TIMER_491_52 = 31
1184};
1185
1186enum ib_qp_attr_mask {
1187 IB_QP_STATE = 1,
1188 IB_QP_CUR_STATE = (1<<1),
1189 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1190 IB_QP_ACCESS_FLAGS = (1<<3),
1191 IB_QP_PKEY_INDEX = (1<<4),
1192 IB_QP_PORT = (1<<5),
1193 IB_QP_QKEY = (1<<6),
1194 IB_QP_AV = (1<<7),
1195 IB_QP_PATH_MTU = (1<<8),
1196 IB_QP_TIMEOUT = (1<<9),
1197 IB_QP_RETRY_CNT = (1<<10),
1198 IB_QP_RNR_RETRY = (1<<11),
1199 IB_QP_RQ_PSN = (1<<12),
1200 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1201 IB_QP_ALT_PATH = (1<<14),
1202 IB_QP_MIN_RNR_TIMER = (1<<15),
1203 IB_QP_SQ_PSN = (1<<16),
1204 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1205 IB_QP_PATH_MIG_STATE = (1<<18),
1206 IB_QP_CAP = (1<<19),
Matan Barakdd5f03b2013-12-12 18:03:11 +02001207 IB_QP_DEST_QPN = (1<<20),
Matan Barakaa744cc2015-10-15 18:38:53 +03001208 IB_QP_RESERVED1 = (1<<21),
1209 IB_QP_RESERVED2 = (1<<22),
1210 IB_QP_RESERVED3 = (1<<23),
1211 IB_QP_RESERVED4 = (1<<24),
Bodong Wang528e5a12016-12-01 13:43:14 +02001212 IB_QP_RATE_LIMIT = (1<<25),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213};
1214
1215enum ib_qp_state {
1216 IB_QPS_RESET,
1217 IB_QPS_INIT,
1218 IB_QPS_RTR,
1219 IB_QPS_RTS,
1220 IB_QPS_SQD,
1221 IB_QPS_SQE,
1222 IB_QPS_ERR
1223};
1224
1225enum ib_mig_state {
1226 IB_MIG_MIGRATED,
1227 IB_MIG_REARM,
1228 IB_MIG_ARMED
1229};
1230
Shani Michaeli7083e422013-02-06 16:19:12 +00001231enum ib_mw_type {
1232 IB_MW_TYPE_1 = 1,
1233 IB_MW_TYPE_2 = 2
1234};
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236struct ib_qp_attr {
1237 enum ib_qp_state qp_state;
1238 enum ib_qp_state cur_qp_state;
1239 enum ib_mtu path_mtu;
1240 enum ib_mig_state path_mig_state;
1241 u32 qkey;
1242 u32 rq_psn;
1243 u32 sq_psn;
1244 u32 dest_qp_num;
1245 int qp_access_flags;
1246 struct ib_qp_cap cap;
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001247 struct rdma_ah_attr ah_attr;
1248 struct rdma_ah_attr alt_ah_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 u16 pkey_index;
1250 u16 alt_pkey_index;
1251 u8 en_sqd_async_notify;
1252 u8 sq_draining;
1253 u8 max_rd_atomic;
1254 u8 max_dest_rd_atomic;
1255 u8 min_rnr_timer;
1256 u8 port_num;
1257 u8 timeout;
1258 u8 retry_cnt;
1259 u8 rnr_retry;
1260 u8 alt_port_num;
1261 u8 alt_timeout;
Bodong Wang528e5a12016-12-01 13:43:14 +02001262 u32 rate_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263};
1264
1265enum ib_wr_opcode {
Jason Gunthorpe9a597392018-08-14 15:33:02 -07001266 /* These are shared with userspace */
1267 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1268 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1269 IB_WR_SEND = IB_UVERBS_WR_SEND,
1270 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1271 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1272 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1273 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1274 IB_WR_LSO = IB_UVERBS_WR_TSO,
1275 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1276 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1277 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1278 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1279 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1280 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1281 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1282
1283 /* These are kernel only and can not be issued by userspace */
1284 IB_WR_REG_MR = 0x20,
Max Gurtovoy38ca87c2019-06-11 18:52:46 +03001285 IB_WR_REG_MR_INTEGRITY,
Jason Gunthorpe9a597392018-08-14 15:33:02 -07001286
Jack Morgenstein0134f162013-07-07 17:25:52 +03001287 /* reserve values for low level drivers' internal use.
1288 * These values will not be used at all in the ib core layer.
1289 */
1290 IB_WR_RESERVED1 = 0xf0,
1291 IB_WR_RESERVED2,
1292 IB_WR_RESERVED3,
1293 IB_WR_RESERVED4,
1294 IB_WR_RESERVED5,
1295 IB_WR_RESERVED6,
1296 IB_WR_RESERVED7,
1297 IB_WR_RESERVED8,
1298 IB_WR_RESERVED9,
1299 IB_WR_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300};
1301
1302enum ib_send_flags {
1303 IB_SEND_FENCE = 1,
1304 IB_SEND_SIGNALED = (1<<1),
1305 IB_SEND_SOLICITED = (1<<2),
Eli Cohene0605d92008-01-30 18:30:57 +02001306 IB_SEND_INLINE = (1<<3),
Jack Morgenstein0134f162013-07-07 17:25:52 +03001307 IB_SEND_IP_CSUM = (1<<4),
1308
1309 /* reserve bits 26-31 for low level drivers' internal use */
1310 IB_SEND_RESERVED_START = (1 << 26),
1311 IB_SEND_RESERVED_END = (1 << 31),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312};
1313
1314struct ib_sge {
1315 u64 addr;
1316 u32 length;
1317 u32 lkey;
1318};
1319
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001320struct ib_cqe {
1321 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1322};
1323
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324struct ib_send_wr {
1325 struct ib_send_wr *next;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001326 union {
1327 u64 wr_id;
1328 struct ib_cqe *wr_cqe;
1329 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 struct ib_sge *sg_list;
1331 int num_sge;
1332 enum ib_wr_opcode opcode;
1333 int send_flags;
Roland Dreier0f39cf32008-04-16 21:09:32 -07001334 union {
1335 __be32 imm_data;
1336 u32 invalidate_rkey;
1337 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338};
1339
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001340struct ib_rdma_wr {
1341 struct ib_send_wr wr;
1342 u64 remote_addr;
1343 u32 rkey;
1344};
1345
Bart Van Asschef696bf62018-07-18 09:25:14 -07001346static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001347{
1348 return container_of(wr, struct ib_rdma_wr, wr);
1349}
1350
1351struct ib_atomic_wr {
1352 struct ib_send_wr wr;
1353 u64 remote_addr;
1354 u64 compare_add;
1355 u64 swap;
1356 u64 compare_add_mask;
1357 u64 swap_mask;
1358 u32 rkey;
1359};
1360
Bart Van Asschef696bf62018-07-18 09:25:14 -07001361static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001362{
1363 return container_of(wr, struct ib_atomic_wr, wr);
1364}
1365
1366struct ib_ud_wr {
1367 struct ib_send_wr wr;
1368 struct ib_ah *ah;
1369 void *header;
1370 int hlen;
1371 int mss;
1372 u32 remote_qpn;
1373 u32 remote_qkey;
1374 u16 pkey_index; /* valid for GSI only */
1375 u8 port_num; /* valid for DR SMPs on switch only */
1376};
1377
Bart Van Asschef696bf62018-07-18 09:25:14 -07001378static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001379{
1380 return container_of(wr, struct ib_ud_wr, wr);
1381}
1382
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001383struct ib_reg_wr {
1384 struct ib_send_wr wr;
1385 struct ib_mr *mr;
1386 u32 key;
1387 int access;
1388};
1389
Bart Van Asschef696bf62018-07-18 09:25:14 -07001390static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001391{
1392 return container_of(wr, struct ib_reg_wr, wr);
1393}
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395struct ib_recv_wr {
1396 struct ib_recv_wr *next;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001397 union {
1398 u64 wr_id;
1399 struct ib_cqe *wr_cqe;
1400 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 struct ib_sge *sg_list;
1402 int num_sge;
1403};
1404
1405enum ib_access_flags {
Jason Gunthorpe4fca0372018-07-11 16:20:44 -06001406 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1407 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1408 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1409 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1410 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1411 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1412 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1413 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1414
1415 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416};
1417
Christoph Hellwigb7d3e0a2015-12-23 19:12:47 +01001418/*
1419 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1420 * are hidden here instead of a uapi header!
1421 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422enum ib_mr_rereg_flags {
1423 IB_MR_REREG_TRANS = 1,
1424 IB_MR_REREG_PD = (1<<1),
Matan Barak7e6edb92014-07-31 11:01:28 +03001425 IB_MR_REREG_ACCESS = (1<<2),
1426 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427};
1428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429struct ib_fmr_attr {
1430 int max_pages;
1431 int max_maps;
Or Gerlitzd36f34a2006-02-02 10:43:45 -08001432 u8 page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433};
1434
Haggai Eran882214e2014-12-11 17:04:18 +02001435struct ib_umem;
1436
Matan Barak38321252017-04-04 13:31:42 +03001437enum rdma_remove_reason {
Yishai Hadas1c774832018-06-20 17:11:39 +03001438 /*
1439 * Userspace requested uobject deletion or initial try
1440 * to remove uobject via cleanup. Call could fail
1441 */
Matan Barak38321252017-04-04 13:31:42 +03001442 RDMA_REMOVE_DESTROY,
1443 /* Context deletion. This call should delete the actual object itself */
1444 RDMA_REMOVE_CLOSE,
1445 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1446 RDMA_REMOVE_DRIVER_REMOVE,
Jason Gunthorpe87ad80a2018-07-25 21:40:12 -06001447 /* uobj is being cleaned-up before being committed */
1448 RDMA_REMOVE_ABORT,
Matan Barak38321252017-04-04 13:31:42 +03001449};
1450
Parav Pandit43579b52017-01-10 00:02:14 +00001451struct ib_rdmacg_object {
1452#ifdef CONFIG_CGROUP_RDMA
1453 struct rdma_cgroup *cg; /* owner rdma cgroup */
1454#endif
1455};
1456
Roland Dreiere2773c02005-07-07 17:57:10 -07001457struct ib_ucontext {
1458 struct ib_device *device;
Matan Barak771addf2017-04-04 13:31:41 +03001459 struct ib_uverbs_file *ufile;
Jason Gunthorpee9517472018-07-10 20:55:19 -06001460 /*
1461 * 'closing' can be read by the driver only during a destroy callback,
1462 * it is set when we are closing the file descriptor and indicates
1463 * that mm_sem may be locked.
1464 */
Leon Romanovsky6ceb6332018-09-03 20:18:03 +03001465 bool closing;
Shachar Raindel8ada2c12014-12-11 17:04:17 +02001466
Yishai Hadas1c774832018-06-20 17:11:39 +03001467 bool cleanup_retryable;
Matan Barak38321252017-04-04 13:31:42 +03001468
Parav Pandit43579b52017-01-10 00:02:14 +00001469 struct ib_rdmacg_object cg_obj;
Leon Romanovsky60615212018-11-28 13:16:43 +02001470 /*
1471 * Implementation details of the RDMA core, don't use in drivers:
1472 */
1473 struct rdma_restrack_entry res;
Roland Dreiere2773c02005-07-07 17:57:10 -07001474};
1475
1476struct ib_uobject {
1477 u64 user_handle; /* handle given to us by userspace */
Jason Gunthorpe6a5e9c82018-07-04 11:32:07 +03001478 /* ufile & ucontext owning this object */
1479 struct ib_uverbs_file *ufile;
1480 /* FIXME, save memory: ufile->context == context */
Roland Dreiere2773c02005-07-07 17:57:10 -07001481 struct ib_ucontext *context; /* associated user context */
Roland Dreier9ead1902006-06-17 20:44:49 -07001482 void *object; /* containing object */
Roland Dreiere2773c02005-07-07 17:57:10 -07001483 struct list_head list; /* link to context's list */
Parav Pandit43579b52017-01-10 00:02:14 +00001484 struct ib_rdmacg_object cg_obj; /* rdmacg object */
Roland Dreierb3d636b2008-04-16 21:01:06 -07001485 int id; /* index into kernel idr */
Roland Dreier9ead1902006-06-17 20:44:49 -07001486 struct kref ref;
Matan Barak38321252017-04-04 13:31:42 +03001487 atomic_t usecnt; /* protects exclusive access */
Mike Marciniszynd144da82015-11-02 12:13:25 -05001488 struct rcu_head rcu; /* kfree_rcu() overhead */
Matan Barak38321252017-04-04 13:31:42 +03001489
Jason Gunthorpe6b0d08f2018-08-09 20:14:37 -06001490 const struct uverbs_api_object *uapi_object;
Roland Dreiere2773c02005-07-07 17:57:10 -07001491};
1492
Roland Dreiere2773c02005-07-07 17:57:10 -07001493struct ib_udata {
Yann Droneaud309243e2013-12-11 23:01:44 +01001494 const void __user *inbuf;
Roland Dreiere2773c02005-07-07 17:57:10 -07001495 void __user *outbuf;
1496 size_t inlen;
1497 size_t outlen;
1498};
1499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500struct ib_pd {
Jason Gunthorpe96249d72015-08-05 14:14:45 -06001501 u32 local_dma_lkey;
Christoph Hellwiged082d32016-09-05 12:56:17 +02001502 u32 flags;
Roland Dreiere2773c02005-07-07 17:57:10 -07001503 struct ib_device *device;
1504 struct ib_uobject *uobject;
1505 atomic_t usecnt; /* count all resources */
Christoph Hellwig50d46332016-09-05 12:56:16 +02001506
Christoph Hellwiged082d32016-09-05 12:56:17 +02001507 u32 unsafe_global_rkey;
1508
Christoph Hellwig50d46332016-09-05 12:56:16 +02001509 /*
1510 * Implementation details of the RDMA core, don't use in drivers:
1511 */
1512 struct ib_mr *__internal_mr;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001513 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514};
1515
Sean Hefty59991f92011-05-23 17:52:46 -07001516struct ib_xrcd {
1517 struct ib_device *device;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001518 atomic_t usecnt; /* count all exposed resources */
Sean Hefty53d0bd12011-05-24 08:33:46 -07001519 struct inode *inode;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001520
1521 struct mutex tgt_qp_mutex;
1522 struct list_head tgt_qp_list;
Sean Hefty59991f92011-05-23 17:52:46 -07001523};
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525struct ib_ah {
1526 struct ib_device *device;
1527 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001528 struct ib_uobject *uobject;
Jason Gunthorpe1a1f4602018-06-13 10:22:08 +03001529 const struct ib_gid_attr *sgid_attr;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001530 enum rdma_ah_attr_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531};
1532
1533typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1534
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001535enum ib_poll_context {
Jack Morgensteinf7948092018-08-27 08:35:55 +03001536 IB_POLL_DIRECT, /* caller context, no hw completions */
1537 IB_POLL_SOFTIRQ, /* poll from softirq context */
1538 IB_POLL_WORKQUEUE, /* poll from workqueue */
1539 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001540};
1541
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542struct ib_cq {
Roland Dreiere2773c02005-07-07 17:57:10 -07001543 struct ib_device *device;
1544 struct ib_uobject *uobject;
1545 ib_comp_handler comp_handler;
1546 void (*event_handler)(struct ib_event *, void *);
Dotan Barak4deccd62008-07-14 23:48:44 -07001547 void *cq_context;
Roland Dreiere2773c02005-07-07 17:57:10 -07001548 int cqe;
1549 atomic_t usecnt; /* count number of work queues */
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001550 enum ib_poll_context poll_ctx;
1551 struct ib_wc *wc;
1552 union {
1553 struct irq_poll iop;
1554 struct work_struct work;
1555 };
Jack Morgensteinf7948092018-08-27 08:35:55 +03001556 struct workqueue_struct *comp_wq;
Yamin Friedmanda662972019-07-08 13:59:03 +03001557 struct dim *dim;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001558 /*
1559 * Implementation details of the RDMA core, don't use in drivers:
1560 */
1561 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562};
1563
1564struct ib_srq {
Roland Dreierd41fcc62005-08-18 12:23:08 -07001565 struct ib_device *device;
1566 struct ib_pd *pd;
1567 struct ib_uobject *uobject;
1568 void (*event_handler)(struct ib_event *, void *);
1569 void *srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -07001570 enum ib_srq_type srq_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 atomic_t usecnt;
Sean Hefty418d5132011-05-23 19:42:29 -07001572
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001573 struct {
1574 struct ib_cq *cq;
1575 union {
1576 struct {
1577 struct ib_xrcd *xrcd;
1578 u32 srq_num;
1579 } xrc;
1580 };
Sean Hefty418d5132011-05-23 19:42:29 -07001581 } ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582};
1583
Noa Osherovichebaaee22017-01-18 15:39:54 +02001584enum ib_raw_packet_caps {
1585 /* Strip cvlan from incoming packet and report it in the matching work
1586 * completion is supported.
1587 */
1588 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1589 /* Scatter FCS field of an incoming packet to host memory is supported.
1590 */
1591 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1592 /* Checksum offloads are supported (for both send and receive). */
1593 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
Maor Gottlieb7d9336d2017-05-30 10:29:10 +03001594 /* When a packet is received for an RQ with no receive WQEs, the
1595 * packet processing is delayed.
1596 */
1597 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
Noa Osherovichebaaee22017-01-18 15:39:54 +02001598};
1599
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001600enum ib_wq_type {
1601 IB_WQT_RQ
1602};
1603
1604enum ib_wq_state {
1605 IB_WQS_RESET,
1606 IB_WQS_RDY,
1607 IB_WQS_ERR
1608};
1609
1610struct ib_wq {
1611 struct ib_device *device;
1612 struct ib_uobject *uobject;
1613 void *wq_context;
1614 void (*event_handler)(struct ib_event *, void *);
1615 struct ib_pd *pd;
1616 struct ib_cq *cq;
1617 u32 wq_num;
1618 enum ib_wq_state state;
1619 enum ib_wq_type wq_type;
1620 atomic_t usecnt;
1621};
1622
Noa Osherovich10bac722017-01-18 15:39:55 +02001623enum ib_wq_flags {
1624 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
Noa Osherovich27b0df12017-01-18 15:39:57 +02001625 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
Maor Gottlieb7d9336d2017-05-30 10:29:10 +03001626 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
Noa Osheroviche1d2e882017-10-29 13:59:44 +02001627 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
Noa Osherovich10bac722017-01-18 15:39:55 +02001628};
1629
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001630struct ib_wq_init_attr {
1631 void *wq_context;
1632 enum ib_wq_type wq_type;
1633 u32 max_wr;
1634 u32 max_sge;
1635 struct ib_cq *cq;
1636 void (*event_handler)(struct ib_event *, void *);
Noa Osherovich10bac722017-01-18 15:39:55 +02001637 u32 create_flags; /* Use enum ib_wq_flags */
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001638};
1639
1640enum ib_wq_attr_mask {
Noa Osherovich10bac722017-01-18 15:39:55 +02001641 IB_WQ_STATE = 1 << 0,
1642 IB_WQ_CUR_STATE = 1 << 1,
1643 IB_WQ_FLAGS = 1 << 2,
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001644};
1645
1646struct ib_wq_attr {
1647 enum ib_wq_state wq_state;
1648 enum ib_wq_state curr_wq_state;
Noa Osherovich10bac722017-01-18 15:39:55 +02001649 u32 flags; /* Use enum ib_wq_flags */
1650 u32 flags_mask; /* Use enum ib_wq_flags */
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001651};
1652
Yishai Hadas6d397862016-05-23 15:20:51 +03001653struct ib_rwq_ind_table {
1654 struct ib_device *device;
1655 struct ib_uobject *uobject;
1656 atomic_t usecnt;
1657 u32 ind_tbl_num;
1658 u32 log_ind_tbl_size;
1659 struct ib_wq **ind_tbl;
1660};
1661
1662struct ib_rwq_ind_table_init_attr {
1663 u32 log_ind_tbl_size;
1664 /* Each entry is a pointer to Receive Work Queue */
1665 struct ib_wq **ind_tbl;
1666};
1667
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001668enum port_pkey_state {
1669 IB_PORT_PKEY_NOT_VALID = 0,
1670 IB_PORT_PKEY_VALID = 1,
1671 IB_PORT_PKEY_LISTED = 2,
1672};
1673
1674struct ib_qp_security;
1675
1676struct ib_port_pkey {
1677 enum port_pkey_state state;
1678 u16 pkey_index;
1679 u8 port_num;
1680 struct list_head qp_list;
1681 struct list_head to_error_list;
1682 struct ib_qp_security *sec;
1683};
1684
1685struct ib_ports_pkeys {
1686 struct ib_port_pkey main;
1687 struct ib_port_pkey alt;
1688};
1689
1690struct ib_qp_security {
1691 struct ib_qp *qp;
1692 struct ib_device *dev;
1693 /* Hold this mutex when changing port and pkey settings. */
1694 struct mutex mutex;
1695 struct ib_ports_pkeys *ports_pkeys;
1696 /* A list of all open shared QP handles. Required to enforce security
1697 * properly for all users of a shared QP.
1698 */
1699 struct list_head shared_qp_list;
1700 void *security;
1701 bool destroying;
1702 atomic_t error_list_count;
1703 struct completion error_complete;
1704 int error_comps_pending;
1705};
1706
Bart Van Assche632bc3f2016-07-21 13:03:30 -07001707/*
1708 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1709 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1710 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711struct ib_qp {
1712 struct ib_device *device;
1713 struct ib_pd *pd;
1714 struct ib_cq *send_cq;
1715 struct ib_cq *recv_cq;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001716 spinlock_t mr_lock;
1717 int mrs_used;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001718 struct list_head rdma_mrs;
Christoph Hellwig0e353e32016-05-03 18:01:12 +02001719 struct list_head sig_mrs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001721 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Sean Heftyd3d72d92011-05-26 23:06:44 -07001722 struct list_head xrcd_list;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001723
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001724 /* count times opened, mcast attaches, flow attaches */
1725 atomic_t usecnt;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001726 struct list_head open_list;
1727 struct ib_qp *real_qp;
Roland Dreiere2773c02005-07-07 17:57:10 -07001728 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 void (*event_handler)(struct ib_event *, void *);
1730 void *qp_context;
Jason Gunthorpe1a1f4602018-06-13 10:22:08 +03001731 /* sgid_attrs associated with the AV's */
1732 const struct ib_gid_attr *av_sgid_attr;
1733 const struct ib_gid_attr *alt_path_sgid_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 u32 qp_num;
Bart Van Assche632bc3f2016-07-21 13:03:30 -07001735 u32 max_write_sge;
1736 u32 max_read_sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 enum ib_qp_type qp_type;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001738 struct ib_rwq_ind_table *rwq_ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001739 struct ib_qp_security *qp_sec;
Noa Osherovich498ca3c2017-08-23 08:35:40 +03001740 u8 port;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001741
Max Gurtovoy185eddc2019-06-11 18:52:51 +03001742 bool integrity_en;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001743 /*
1744 * Implementation details of the RDMA core, don't use in drivers:
1745 */
1746 struct rdma_restrack_entry res;
Mark Zhang99fa3312019-07-02 13:02:35 +03001747
1748 /* The counter the qp is bind to */
1749 struct rdma_counter *counter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750};
1751
Ariel Levkovichbee76d72018-04-05 18:53:24 +03001752struct ib_dm {
1753 struct ib_device *device;
1754 u32 length;
1755 u32 flags;
1756 struct ib_uobject *uobject;
1757 atomic_t usecnt;
1758};
1759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760struct ib_mr {
Roland Dreiere2773c02005-07-07 17:57:10 -07001761 struct ib_device *device;
1762 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001763 u32 lkey;
1764 u32 rkey;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001765 u64 iova;
Parav Panditedd31552017-09-24 21:46:31 +03001766 u64 length;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001767 unsigned int page_size;
Max Gurtovoya0bc0992019-06-11 18:52:38 +03001768 enum ib_mr_type type;
Steve Wised4a85c32016-05-03 18:01:08 +02001769 bool need_inval;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001770 union {
1771 struct ib_uobject *uobject; /* user */
1772 struct list_head qp_entry; /* FR */
1773 };
Steve Wisefccec5b2018-03-01 13:58:13 -08001774
Ariel Levkovichbe934cc2018-04-05 18:53:25 +03001775 struct ib_dm *dm;
Max Gurtovoy7c717d32019-06-11 18:52:41 +03001776 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
Steve Wisefccec5b2018-03-01 13:58:13 -08001777 /*
1778 * Implementation details of the RDMA core, don't use in drivers:
1779 */
1780 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781};
1782
1783struct ib_mw {
1784 struct ib_device *device;
1785 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001786 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 u32 rkey;
Shani Michaeli7083e422013-02-06 16:19:12 +00001788 enum ib_mw_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789};
1790
1791struct ib_fmr {
1792 struct ib_device *device;
1793 struct ib_pd *pd;
1794 struct list_head list;
1795 u32 lkey;
1796 u32 rkey;
1797};
1798
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001799/* Supported steering options */
1800enum ib_flow_attr_type {
1801 /* steering according to rule specifications */
1802 IB_FLOW_ATTR_NORMAL = 0x0,
1803 /* default unicast and multicast rule -
1804 * receive all Eth traffic which isn't steered to any QP
1805 */
1806 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1807 /* default multicast rule -
1808 * receive all Eth multicast traffic which isn't steered to any QP
1809 */
1810 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1811 /* sniffer rule - receive all port traffic */
1812 IB_FLOW_ATTR_SNIFFER = 0x3
1813};
1814
1815/* Supported steering header types */
1816enum ib_flow_spec_type {
1817 /* L2 headers*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001818 IB_FLOW_SPEC_ETH = 0x20,
1819 IB_FLOW_SPEC_IB = 0x22,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001820 /* L3 header*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001821 IB_FLOW_SPEC_IPV4 = 0x30,
1822 IB_FLOW_SPEC_IPV6 = 0x31,
Matan Barak56ab0b32018-03-28 09:27:49 +03001823 IB_FLOW_SPEC_ESP = 0x34,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001824 /* L4 headers*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001825 IB_FLOW_SPEC_TCP = 0x40,
1826 IB_FLOW_SPEC_UDP = 0x41,
Moses Reuben0dbf3332016-11-14 19:04:47 +02001827 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03001828 IB_FLOW_SPEC_GRE = 0x51,
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03001829 IB_FLOW_SPEC_MPLS = 0x60,
Moses Reubenfbf46862016-11-14 19:04:51 +02001830 IB_FLOW_SPEC_INNER = 0x100,
Moses Reuben460d0192017-01-18 14:59:48 +02001831 /* Actions */
1832 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
Slava Shwartsman483a3962017-04-03 13:13:51 +03001833 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
Matan Barak9b828442018-03-28 09:27:46 +03001834 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
Raed Salem7eea23a2018-05-31 16:43:36 +03001835 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001836};
Matan Barak240ae002013-11-07 15:25:13 +02001837#define IB_FLOW_SPEC_LAYER_MASK 0xF0
Raed Salem7eea23a2018-05-31 16:43:36 +03001838#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
Matan Barak22878db2013-09-01 18:39:52 +03001839
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001840/* Flow steering rule priority is set according to it's domain.
1841 * Lower domain value means higher priority.
1842 */
1843enum ib_flow_domain {
1844 IB_FLOW_DOMAIN_USER,
1845 IB_FLOW_DOMAIN_ETHTOOL,
1846 IB_FLOW_DOMAIN_RFS,
1847 IB_FLOW_DOMAIN_NIC,
1848 IB_FLOW_DOMAIN_NUM /* Must be last */
1849};
1850
Marina Varshavera3100a72016-02-18 18:31:05 +02001851enum ib_flow_flags {
1852 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
Boris Pismenny21e82d32018-03-28 09:27:47 +03001853 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1854 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
Marina Varshavera3100a72016-02-18 18:31:05 +02001855};
1856
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001857struct ib_flow_eth_filter {
1858 u8 dst_mac[6];
1859 u8 src_mac[6];
1860 __be16 ether_type;
1861 __be16 vlan_tag;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001862 /* Must be last */
1863 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001864};
1865
1866struct ib_flow_spec_eth {
Moses Reubenfbf46862016-11-14 19:04:51 +02001867 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001868 u16 size;
1869 struct ib_flow_eth_filter val;
1870 struct ib_flow_eth_filter mask;
1871};
1872
Matan Barak240ae002013-11-07 15:25:13 +02001873struct ib_flow_ib_filter {
1874 __be16 dlid;
1875 __u8 sl;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001876 /* Must be last */
1877 u8 real_sz[0];
Matan Barak240ae002013-11-07 15:25:13 +02001878};
1879
1880struct ib_flow_spec_ib {
Moses Reubenfbf46862016-11-14 19:04:51 +02001881 u32 type;
Matan Barak240ae002013-11-07 15:25:13 +02001882 u16 size;
1883 struct ib_flow_ib_filter val;
1884 struct ib_flow_ib_filter mask;
1885};
1886
Maor Gottlieb989a3a82016-08-30 16:58:33 +03001887/* IPv4 header flags */
1888enum ib_ipv4_flags {
1889 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1890 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
1891 last have this flag set */
1892};
1893
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001894struct ib_flow_ipv4_filter {
1895 __be32 src_ip;
1896 __be32 dst_ip;
Maor Gottlieb989a3a82016-08-30 16:58:33 +03001897 u8 proto;
1898 u8 tos;
1899 u8 ttl;
1900 u8 flags;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001901 /* Must be last */
1902 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001903};
1904
1905struct ib_flow_spec_ipv4 {
Moses Reubenfbf46862016-11-14 19:04:51 +02001906 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001907 u16 size;
1908 struct ib_flow_ipv4_filter val;
1909 struct ib_flow_ipv4_filter mask;
1910};
1911
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001912struct ib_flow_ipv6_filter {
1913 u8 src_ip[16];
1914 u8 dst_ip[16];
Maor Gottlieba72c6a22016-08-30 16:58:34 +03001915 __be32 flow_label;
1916 u8 next_hdr;
1917 u8 traffic_class;
1918 u8 hop_limit;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001919 /* Must be last */
1920 u8 real_sz[0];
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001921};
1922
1923struct ib_flow_spec_ipv6 {
Moses Reubenfbf46862016-11-14 19:04:51 +02001924 u32 type;
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001925 u16 size;
1926 struct ib_flow_ipv6_filter val;
1927 struct ib_flow_ipv6_filter mask;
1928};
1929
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001930struct ib_flow_tcp_udp_filter {
1931 __be16 dst_port;
1932 __be16 src_port;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001933 /* Must be last */
1934 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001935};
1936
1937struct ib_flow_spec_tcp_udp {
Moses Reubenfbf46862016-11-14 19:04:51 +02001938 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001939 u16 size;
1940 struct ib_flow_tcp_udp_filter val;
1941 struct ib_flow_tcp_udp_filter mask;
1942};
1943
Moses Reuben0dbf3332016-11-14 19:04:47 +02001944struct ib_flow_tunnel_filter {
1945 __be32 tunnel_id;
1946 u8 real_sz[0];
1947};
1948
1949/* ib_flow_spec_tunnel describes the Vxlan tunnel
1950 * the tunnel_id from val has the vni value
1951 */
1952struct ib_flow_spec_tunnel {
Moses Reubenfbf46862016-11-14 19:04:51 +02001953 u32 type;
Moses Reuben0dbf3332016-11-14 19:04:47 +02001954 u16 size;
1955 struct ib_flow_tunnel_filter val;
1956 struct ib_flow_tunnel_filter mask;
1957};
1958
Matan Barak56ab0b32018-03-28 09:27:49 +03001959struct ib_flow_esp_filter {
1960 __be32 spi;
1961 __be32 seq;
1962 /* Must be last */
1963 u8 real_sz[0];
1964};
1965
1966struct ib_flow_spec_esp {
1967 u32 type;
1968 u16 size;
1969 struct ib_flow_esp_filter val;
1970 struct ib_flow_esp_filter mask;
1971};
1972
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03001973struct ib_flow_gre_filter {
1974 __be16 c_ks_res0_ver;
1975 __be16 protocol;
1976 __be32 key;
1977 /* Must be last */
1978 u8 real_sz[0];
1979};
1980
1981struct ib_flow_spec_gre {
1982 u32 type;
1983 u16 size;
1984 struct ib_flow_gre_filter val;
1985 struct ib_flow_gre_filter mask;
1986};
1987
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03001988struct ib_flow_mpls_filter {
1989 __be32 tag;
1990 /* Must be last */
1991 u8 real_sz[0];
1992};
1993
1994struct ib_flow_spec_mpls {
1995 u32 type;
1996 u16 size;
1997 struct ib_flow_mpls_filter val;
1998 struct ib_flow_mpls_filter mask;
1999};
2000
Moses Reuben460d0192017-01-18 14:59:48 +02002001struct ib_flow_spec_action_tag {
2002 enum ib_flow_spec_type type;
2003 u16 size;
2004 u32 tag_id;
2005};
2006
Slava Shwartsman483a3962017-04-03 13:13:51 +03002007struct ib_flow_spec_action_drop {
2008 enum ib_flow_spec_type type;
2009 u16 size;
2010};
2011
Matan Barak9b828442018-03-28 09:27:46 +03002012struct ib_flow_spec_action_handle {
2013 enum ib_flow_spec_type type;
2014 u16 size;
2015 struct ib_flow_action *act;
2016};
2017
Raed Salem7eea23a2018-05-31 16:43:36 +03002018enum ib_counters_description {
2019 IB_COUNTER_PACKETS,
2020 IB_COUNTER_BYTES,
2021};
2022
2023struct ib_flow_spec_action_count {
2024 enum ib_flow_spec_type type;
2025 u16 size;
2026 struct ib_counters *counters;
2027};
2028
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002029union ib_flow_spec {
2030 struct {
Moses Reubenfbf46862016-11-14 19:04:51 +02002031 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002032 u16 size;
2033 };
2034 struct ib_flow_spec_eth eth;
Matan Barak240ae002013-11-07 15:25:13 +02002035 struct ib_flow_spec_ib ib;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002036 struct ib_flow_spec_ipv4 ipv4;
2037 struct ib_flow_spec_tcp_udp tcp_udp;
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03002038 struct ib_flow_spec_ipv6 ipv6;
Moses Reuben0dbf3332016-11-14 19:04:47 +02002039 struct ib_flow_spec_tunnel tunnel;
Matan Barak56ab0b32018-03-28 09:27:49 +03002040 struct ib_flow_spec_esp esp;
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03002041 struct ib_flow_spec_gre gre;
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03002042 struct ib_flow_spec_mpls mpls;
Moses Reuben460d0192017-01-18 14:59:48 +02002043 struct ib_flow_spec_action_tag flow_tag;
Slava Shwartsman483a3962017-04-03 13:13:51 +03002044 struct ib_flow_spec_action_drop drop;
Matan Barak9b828442018-03-28 09:27:46 +03002045 struct ib_flow_spec_action_handle action;
Raed Salem7eea23a2018-05-31 16:43:36 +03002046 struct ib_flow_spec_action_count flow_count;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002047};
2048
2049struct ib_flow_attr {
2050 enum ib_flow_attr_type type;
2051 u16 size;
2052 u16 priority;
2053 u32 flags;
2054 u8 num_of_specs;
2055 u8 port;
Matthew Wilcox7654cb12018-06-07 07:57:16 -07002056 union ib_flow_spec flows[];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002057};
2058
2059struct ib_flow {
2060 struct ib_qp *qp;
Yishai Hadas6cd080a2018-07-23 15:25:08 +03002061 struct ib_device *device;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002062 struct ib_uobject *uobject;
2063};
2064
Matan Barak2eb9bea2018-03-28 09:27:45 +03002065enum ib_flow_action_type {
2066 IB_FLOW_ACTION_UNSPECIFIED,
2067 IB_FLOW_ACTION_ESP = 1,
2068};
2069
2070struct ib_flow_action_attrs_esp_keymats {
2071 enum ib_uverbs_flow_action_esp_keymat protocol;
2072 union {
2073 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2074 } keymat;
2075};
2076
2077struct ib_flow_action_attrs_esp_replays {
2078 enum ib_uverbs_flow_action_esp_replay protocol;
2079 union {
2080 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2081 } replay;
2082};
2083
2084enum ib_flow_action_attrs_esp_flags {
2085 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2086 * This is done in order to share the same flags between user-space and
2087 * kernel and spare an unnecessary translation.
2088 */
2089
2090 /* Kernel flags */
2091 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
Matan Barak7d12f8d2018-03-28 09:27:48 +03002092 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
Matan Barak2eb9bea2018-03-28 09:27:45 +03002093};
2094
2095struct ib_flow_spec_list {
2096 struct ib_flow_spec_list *next;
2097 union ib_flow_spec spec;
2098};
2099
2100struct ib_flow_action_attrs_esp {
2101 struct ib_flow_action_attrs_esp_keymats *keymat;
2102 struct ib_flow_action_attrs_esp_replays *replay;
2103 struct ib_flow_spec_list *encap;
2104 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2105 * Value of 0 is a valid value.
2106 */
2107 u32 esn;
2108 u32 spi;
2109 u32 seq;
2110 u32 tfc_pad;
2111 /* Use enum ib_flow_action_attrs_esp_flags */
2112 u64 flags;
2113 u64 hard_limit_pkts;
2114};
2115
2116struct ib_flow_action {
2117 struct ib_device *device;
2118 struct ib_uobject *uobject;
2119 enum ib_flow_action_type type;
2120 atomic_t usecnt;
2121};
2122
Ira Weiny4cd7c942015-06-06 14:38:31 -04002123struct ib_mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124struct ib_grh;
2125
2126enum ib_process_mad_flags {
2127 IB_MAD_IGNORE_MKEY = 1,
2128 IB_MAD_IGNORE_BKEY = 2,
2129 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2130};
2131
2132enum ib_mad_result {
2133 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
2134 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
2135 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
2136 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
2137};
2138
Jack Wang21d64542017-01-17 10:11:12 +01002139struct ib_port_cache {
Daniel Jurgens883c71f2017-05-19 15:48:51 +03002140 u64 subnet_prefix;
Jack Wang21d64542017-01-17 10:11:12 +01002141 struct ib_pkey_cache *pkey;
2142 struct ib_gid_table *gid;
2143 u8 lmc;
2144 enum ib_port_state port_state;
2145};
2146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147struct ib_cache {
2148 rwlock_t lock;
2149 struct ib_event_handler event_handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150};
2151
Ira Weiny77386132015-05-13 20:02:58 -04002152struct ib_port_immutable {
2153 int pkey_tbl_len;
2154 int gid_tbl_len;
Ira Weinyf9b22e32015-05-13 20:02:59 -04002155 u32 core_cap_flags;
Ira Weiny337877a2015-06-06 14:38:29 -04002156 u32 max_mad_size;
Ira Weiny77386132015-05-13 20:02:58 -04002157};
2158
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002159struct ib_port_data {
Jason Gunthorpe324e2272019-02-12 21:12:51 -07002160 struct ib_device *ib_dev;
2161
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002162 struct ib_port_immutable immutable;
2163
2164 spinlock_t pkey_list_lock;
2165 struct list_head pkey_list;
Jason Gunthorpe8faea9f2019-02-12 21:12:49 -07002166
2167 struct ib_port_cache cache;
Jason Gunthorpec2261dd2019-02-12 21:12:50 -07002168
2169 spinlock_t netdev_lock;
Jason Gunthorpe324e2272019-02-12 21:12:51 -07002170 struct net_device __rcu *netdev;
2171 struct hlist_node ndev_hash_link;
Mark Zhang413d3342019-07-02 13:02:34 +03002172 struct rdma_port_counter port_counter;
Mark Zhang6e7be472019-07-02 13:02:46 +03002173 struct rdma_hw_stats *hw_stats;
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002174};
2175
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002176/* rdma netdev type - specifies protocol type */
2177enum rdma_netdev_t {
Niranjana Vishwanathapuraf0ad83ac2017-04-10 11:22:25 +03002178 RDMA_NETDEV_OPA_VNIC,
2179 RDMA_NETDEV_IPOIB,
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002180};
2181
2182/**
2183 * struct rdma_netdev - rdma netdev
2184 * For cases where netstack interfacing is required.
2185 */
2186struct rdma_netdev {
2187 void *clnt_priv;
2188 struct ib_device *hca;
2189 u8 port_num;
2190
Jason Gunthorpe9f49a5b2018-07-29 11:34:56 +03002191 /*
2192 * cleanup function must be specified.
2193 * FIXME: This is only used for OPA_VNIC and that usage should be
2194 * removed too.
2195 */
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07002196 void (*free_rdma_netdev)(struct net_device *netdev);
2197
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002198 /* control functions */
2199 void (*set_id)(struct net_device *netdev, int id);
Niranjana Vishwanathapuraf0ad83ac2017-04-10 11:22:25 +03002200 /* send packet */
2201 int (*send)(struct net_device *dev, struct sk_buff *skb,
2202 struct ib_ah *address, u32 dqpn);
2203 /* multicast */
2204 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2205 union ib_gid *gid, u16 mlid,
2206 int set_qkey, u32 qkey);
2207 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2208 union ib_gid *gid, u16 mlid);
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002209};
2210
Denis Drozdovf6a8a192018-08-14 14:08:51 +03002211struct rdma_netdev_alloc_params {
2212 size_t sizeof_priv;
2213 unsigned int txqs;
2214 unsigned int rxqs;
2215 void *param;
2216
2217 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2218 struct net_device *netdev, void *param);
2219};
2220
Raed Salemfa9b1802018-05-31 16:43:31 +03002221struct ib_counters {
2222 struct ib_device *device;
2223 struct ib_uobject *uobject;
2224 /* num of objects attached */
2225 atomic_t usecnt;
2226};
2227
Raed Salem51d7a532018-05-31 16:43:33 +03002228struct ib_counters_read_attr {
2229 u64 *counters_buff;
2230 u32 ncounters;
2231 u32 flags; /* use enum ib_read_counters_flags */
2232};
2233
Matan Barak2eb9bea2018-03-28 09:27:45 +03002234struct uverbs_attr_bundle;
Kamal Heibdd05cb82019-04-29 14:59:06 +03002235struct iw_cm_id;
2236struct iw_cm_conn_param;
Matan Barak2eb9bea2018-03-28 09:27:45 +03002237
Leon Romanovsky30471d42019-02-03 14:55:50 +02002238#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2239 .size_##ib_struct = \
2240 (sizeof(struct drv_struct) + \
2241 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2242 BUILD_BUG_ON_ZERO( \
2243 !__same_type(((struct drv_struct *)NULL)->member, \
2244 struct ib_struct)))
2245
Leon Romanovskyf6316032019-03-28 15:12:58 +02002246#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2247 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2248
Leon Romanovsky30471d42019-02-03 14:55:50 +02002249#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
Leon Romanovskyf6316032019-03-28 15:12:58 +02002250 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
Leon Romanovsky30471d42019-02-03 14:55:50 +02002251
2252#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2253
Kamal Heib521ed0d2018-12-10 21:09:30 +02002254/**
2255 * struct ib_device_ops - InfiniBand device operations
2256 * This structure defines all the InfiniBand device operations, providers will
2257 * need to define the supported operations, otherwise they will be set to null.
2258 */
2259struct ib_device_ops {
Jason Gunthorpe7a154142019-06-05 14:39:26 -03002260 struct module *owner;
Jason Gunthorpeb9560a42019-06-05 14:39:24 -03002261 enum rdma_driver_id driver_id;
Jason Gunthorpe72c6ec12019-06-05 14:39:25 -03002262 u32 uverbs_abi_ver;
Jason Gunthorpe8f71bb02019-06-13 21:38:19 -03002263 unsigned int uverbs_no_driver_id_binding:1;
Jason Gunthorpeb9560a42019-06-05 14:39:24 -03002264
Kamal Heib521ed0d2018-12-10 21:09:30 +02002265 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2266 const struct ib_send_wr **bad_send_wr);
2267 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2268 const struct ib_recv_wr **bad_recv_wr);
2269 void (*drain_rq)(struct ib_qp *qp);
2270 void (*drain_sq)(struct ib_qp *qp);
2271 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2272 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2273 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2274 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2275 int (*post_srq_recv)(struct ib_srq *srq,
2276 const struct ib_recv_wr *recv_wr,
2277 const struct ib_recv_wr **bad_recv_wr);
2278 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2279 u8 port_num, const struct ib_wc *in_wc,
2280 const struct ib_grh *in_grh,
2281 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
2282 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
2283 u16 *out_mad_pkey_index);
2284 int (*query_device)(struct ib_device *device,
2285 struct ib_device_attr *device_attr,
2286 struct ib_udata *udata);
2287 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2288 struct ib_device_modify *device_modify);
2289 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2290 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2291 int comp_vector);
2292 int (*query_port)(struct ib_device *device, u8 port_num,
2293 struct ib_port_attr *port_attr);
2294 int (*modify_port)(struct ib_device *device, u8 port_num,
2295 int port_modify_mask,
2296 struct ib_port_modify *port_modify);
2297 /**
2298 * The following mandatory functions are used only at device
2299 * registration. Keep functions such as these at the end of this
2300 * structure to avoid cache line misses when accessing struct ib_device
2301 * in fast paths.
2302 */
2303 int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2304 struct ib_port_immutable *immutable);
2305 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2306 u8 port_num);
2307 /**
2308 * When calling get_netdev, the HW vendor's driver should return the
2309 * net device of device @device at port @port_num or NULL if such
2310 * a net device doesn't exist. The vendor driver should call dev_hold
2311 * on this net device. The HW vendor's device driver must guarantee
2312 * that this function returns NULL before the net device has finished
2313 * NETDEV_UNREGISTER state.
2314 */
2315 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2316 /**
2317 * rdma netdev operation
2318 *
2319 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2320 * must return -EOPNOTSUPP if it doesn't support the specified type.
2321 */
2322 struct net_device *(*alloc_rdma_netdev)(
2323 struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2324 const char *name, unsigned char name_assign_type,
2325 void (*setup)(struct net_device *));
2326
2327 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2328 enum rdma_netdev_t type,
2329 struct rdma_netdev_alloc_params *params);
2330 /**
2331 * query_gid should be return GID value for @device, when @port_num
2332 * link layer is either IB or iWarp. It is no-op if @port_num port
2333 * is RoCE link layer.
2334 */
2335 int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2336 union ib_gid *gid);
2337 /**
2338 * When calling add_gid, the HW vendor's driver should add the gid
2339 * of device of port at gid index available at @attr. Meta-info of
2340 * that gid (for example, the network device related to this gid) is
2341 * available at @attr. @context allows the HW vendor driver to store
2342 * extra information together with a GID entry. The HW vendor driver may
2343 * allocate memory to contain this information and store it in @context
2344 * when a new GID entry is written to. Params are consistent until the
2345 * next call of add_gid or delete_gid. The function should return 0 on
2346 * success or error otherwise. The function could be called
2347 * concurrently for different ports. This function is only called when
2348 * roce_gid_table is used.
2349 */
2350 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2351 /**
2352 * When calling del_gid, the HW vendor's driver should delete the
2353 * gid of device @device at gid index gid_index of port port_num
2354 * available in @attr.
2355 * Upon the deletion of a GID entry, the HW vendor must free any
2356 * allocated memory. The caller will clear @context afterwards.
2357 * This function is only called when roce_gid_table is used.
2358 */
2359 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2360 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2361 u16 *pkey);
Leon Romanovskya2a074e2019-02-12 20:39:16 +02002362 int (*alloc_ucontext)(struct ib_ucontext *context,
2363 struct ib_udata *udata);
2364 void (*dealloc_ucontext)(struct ib_ucontext *context);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002365 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2366 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +03002367 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002368 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
Leon Romanovskyd3456912019-04-03 16:42:42 +03002369 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
2370 u32 flags, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002371 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2372 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Leon Romanovskyd3456912019-04-03 16:42:42 +03002373 void (*destroy_ah)(struct ib_ah *ah, u32 flags);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002374 int (*create_srq)(struct ib_srq *srq,
2375 struct ib_srq_init_attr *srq_init_attr,
2376 struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002377 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2378 enum ib_srq_attr_mask srq_attr_mask,
2379 struct ib_udata *udata);
2380 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002381 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002382 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2383 struct ib_qp_init_attr *qp_init_attr,
2384 struct ib_udata *udata);
2385 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2386 int qp_attr_mask, struct ib_udata *udata);
2387 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2388 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002389 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
Leon Romanovskye39afe32019-05-28 14:37:29 +03002390 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2391 struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002392 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
Leon Romanovskya52c8e22019-05-28 14:37:28 +03002393 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002394 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2395 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2396 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2397 u64 virt_addr, int mr_access_flags,
2398 struct ib_udata *udata);
2399 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2400 u64 virt_addr, int mr_access_flags,
2401 struct ib_pd *pd, struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002402 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002403 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002404 u32 max_num_sg, struct ib_udata *udata);
Israel Rukshin26bc7ea2019-06-11 18:52:39 +03002405 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2406 u32 max_num_data_sg,
2407 u32 max_num_meta_sg);
Moni Shouaad8a4492018-12-11 13:37:52 +02002408 int (*advise_mr)(struct ib_pd *pd,
2409 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2410 struct ib_sge *sg_list, u32 num_sge,
2411 struct uverbs_attr_bundle *attrs);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002412 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2413 unsigned int *sg_offset);
2414 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2415 struct ib_mr_status *mr_status);
2416 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2417 struct ib_udata *udata);
2418 int (*dealloc_mw)(struct ib_mw *mw);
2419 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
2420 struct ib_fmr_attr *fmr_attr);
2421 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
2422 u64 iova);
2423 int (*unmap_fmr)(struct list_head *fmr_list);
2424 int (*dealloc_fmr)(struct ib_fmr *fmr);
Moni Shouace513462019-08-19 14:17:08 +03002425 void (*invalidate_range)(struct ib_umem_odp *umem_odp,
2426 unsigned long start, unsigned long end);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002427 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2428 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2429 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
Kamal Heib521ed0d2018-12-10 21:09:30 +02002430 struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002431 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002432 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2433 struct ib_flow_attr *flow_attr,
2434 int domain, struct ib_udata *udata);
2435 int (*destroy_flow)(struct ib_flow *flow_id);
2436 struct ib_flow_action *(*create_flow_action_esp)(
2437 struct ib_device *device,
2438 const struct ib_flow_action_attrs_esp *attr,
2439 struct uverbs_attr_bundle *attrs);
2440 int (*destroy_flow_action)(struct ib_flow_action *action);
2441 int (*modify_flow_action_esp)(
2442 struct ib_flow_action *action,
2443 const struct ib_flow_action_attrs_esp *attr,
2444 struct uverbs_attr_bundle *attrs);
2445 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2446 int state);
2447 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2448 struct ifla_vf_info *ivf);
2449 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2450 struct ifla_vf_stats *stats);
2451 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2452 int type);
2453 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2454 struct ib_wq_init_attr *init_attr,
2455 struct ib_udata *udata);
Leon Romanovskya49b1dc2019-06-12 15:27:41 +03002456 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002457 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2458 u32 wq_attr_mask, struct ib_udata *udata);
2459 struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2460 struct ib_device *device,
2461 struct ib_rwq_ind_table_init_attr *init_attr,
2462 struct ib_udata *udata);
2463 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2464 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2465 struct ib_ucontext *context,
2466 struct ib_dm_alloc_attr *attr,
2467 struct uverbs_attr_bundle *attrs);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002468 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002469 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2470 struct ib_dm_mr_attr *attr,
2471 struct uverbs_attr_bundle *attrs);
2472 struct ib_counters *(*create_counters)(
2473 struct ib_device *device, struct uverbs_attr_bundle *attrs);
2474 int (*destroy_counters)(struct ib_counters *counters);
2475 int (*read_counters)(struct ib_counters *counters,
2476 struct ib_counters_read_attr *counters_read_attr,
2477 struct uverbs_attr_bundle *attrs);
Max Gurtovoy2cdfcdd2019-06-11 18:52:40 +03002478 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2479 int data_sg_nents, unsigned int *data_sg_offset,
2480 struct scatterlist *meta_sg, int meta_sg_nents,
2481 unsigned int *meta_sg_offset);
2482
Kamal Heib521ed0d2018-12-10 21:09:30 +02002483 /**
2484 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2485 * driver initialized data. The struct is kfree()'ed by the sysfs
2486 * core when the device is removed. A lifespan of -1 in the return
2487 * struct tells the core to set a default lifespan.
2488 */
2489 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2490 u8 port_num);
2491 /**
2492 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2493 * @index - The index in the value array we wish to have updated, or
2494 * num_counters if we want all stats updated
2495 * Return codes -
2496 * < 0 - Error, no counters updated
2497 * index - Updated the single counter pointed to by index
2498 * num_counters - Updated all counters (will reset the timestamp
2499 * and prevent further calls for lifespan milliseconds)
2500 * Drivers are allowed to update all counters in leiu of just the
2501 * one given in index at their option
2502 */
2503 int (*get_hw_stats)(struct ib_device *device,
2504 struct rdma_hw_stats *stats, u8 port, int index);
Parav Panditea4baf72018-12-18 14:28:30 +02002505 /*
2506 * This function is called once for each port when a ib device is
2507 * registered.
2508 */
2509 int (*init_port)(struct ib_device *device, u8 port_num,
2510 struct kobject *port_sysfs);
Leon Romanovsky02da3752019-01-30 12:49:02 +02002511 /**
2512 * Allows rdma drivers to add their own restrack attributes.
2513 */
2514 int (*fill_res_entry)(struct sk_buff *msg,
2515 struct rdma_restrack_entry *entry);
Leon Romanovsky21a428a2019-02-03 14:55:51 +02002516
Jason Gunthorped0899892019-02-12 21:12:53 -07002517 /* Device lifecycle callbacks */
2518 /*
Jason Gunthorpeca223542019-02-12 21:12:56 -07002519 * Called after the device becomes registered, before clients are
2520 * attached
2521 */
2522 int (*enable_driver)(struct ib_device *dev);
2523 /*
Jason Gunthorped0899892019-02-12 21:12:53 -07002524 * This is called as part of ib_dealloc_device().
2525 */
2526 void (*dealloc_driver)(struct ib_device *dev);
2527
Kamal Heibdd05cb82019-04-29 14:59:06 +03002528 /* iWarp CM callbacks */
2529 void (*iw_add_ref)(struct ib_qp *qp);
2530 void (*iw_rem_ref)(struct ib_qp *qp);
2531 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2532 int (*iw_connect)(struct iw_cm_id *cm_id,
2533 struct iw_cm_conn_param *conn_param);
2534 int (*iw_accept)(struct iw_cm_id *cm_id,
2535 struct iw_cm_conn_param *conn_param);
2536 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2537 u8 pdata_len);
2538 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2539 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
Mark Zhang99fa3312019-07-02 13:02:35 +03002540 /**
2541 * counter_bind_qp - Bind a QP to a counter.
2542 * @counter - The counter to be bound. If counter->id is zero then
2543 * the driver needs to allocate a new counter and set counter->id
2544 */
2545 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2546 /**
2547 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2548 * counter and bind it onto the default one
2549 */
2550 int (*counter_unbind_qp)(struct ib_qp *qp);
2551 /**
2552 * counter_dealloc -De-allocate the hw counter
2553 */
2554 int (*counter_dealloc)(struct rdma_counter *counter);
Mark Zhangc4ffee72019-07-02 13:02:40 +03002555 /**
2556 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2557 * the driver initialized data.
2558 */
2559 struct rdma_hw_stats *(*counter_alloc_stats)(
2560 struct rdma_counter *counter);
2561 /**
2562 * counter_update_stats - Query the stats value of this counter
2563 */
2564 int (*counter_update_stats)(struct rdma_counter *counter);
Kamal Heibdd05cb82019-04-29 14:59:06 +03002565
Leon Romanovskyd3456912019-04-03 16:42:42 +03002566 DECLARE_RDMA_OBJ_SIZE(ib_ah);
Leon Romanovskye39afe32019-05-28 14:37:29 +03002567 DECLARE_RDMA_OBJ_SIZE(ib_cq);
Leon Romanovsky21a428a2019-02-03 14:55:51 +02002568 DECLARE_RDMA_OBJ_SIZE(ib_pd);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002569 DECLARE_RDMA_OBJ_SIZE(ib_srq);
Leon Romanovskya2a074e2019-02-12 20:39:16 +02002570 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002571};
2572
Parav Panditcebe5562019-02-26 13:56:11 +02002573struct ib_core_device {
2574 /* device must be the first element in structure until,
2575 * union of ib_core_device and device exists in ib_device.
2576 */
2577 struct device dev;
Parav Pandit4e0f7b92019-02-26 13:56:13 +02002578 possible_net_t rdma_net;
Parav Panditcebe5562019-02-26 13:56:11 +02002579 struct kobject *ports_kobj;
2580 struct list_head port_list;
2581 struct ib_device *owner; /* reach back to owner ib_device */
2582};
Leon Romanovsky41eda652019-02-18 22:25:47 +02002583
Parav Panditcebe5562019-02-26 13:56:11 +02002584struct rdma_restrack_root;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585struct ib_device {
Bart Van Assche0957c292017-03-07 22:56:53 +00002586 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2587 struct device *dma_device;
Kamal Heib3023a1e2018-12-10 21:09:48 +02002588 struct ib_device_ops ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 char name[IB_DEVICE_NAME_MAX];
Jason Gunthorpe324e2272019-02-12 21:12:51 -07002590 struct rcu_head rcu_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
2592 struct list_head event_handler_list;
2593 spinlock_t event_handler_lock;
2594
Jason Gunthorpe921eab12019-02-06 22:41:54 -07002595 struct rw_semaphore client_data_rwsem;
Jason Gunthorpe0df91bb2019-02-06 22:41:53 -07002596 struct xarray client_data;
Jason Gunthorped0899892019-02-12 21:12:53 -07002597 struct mutex unregistration_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598
2599 struct ib_cache cache;
Ira Weiny77386132015-05-13 20:02:58 -04002600 /**
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002601 * port_data is indexed by port number
Ira Weiny77386132015-05-13 20:02:58 -04002602 */
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002603 struct ib_port_data *port_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03002605 int num_comp_vectors;
2606
Parav Panditcebe5562019-02-26 13:56:11 +02002607 union {
2608 struct device dev;
2609 struct ib_core_device coredev;
2610 };
2611
Parav Panditd4122f52018-10-11 22:31:53 +03002612 /* First group for device attributes,
2613 * Second group for driver provided attributes (optional).
2614 * It is NULL terminated array.
2615 */
2616 const struct attribute_group *groups[3];
Parav Panditadee9f32018-09-05 09:47:58 +03002617
Alexander Chiang17a55f72010-02-02 19:09:16 +00002618 u64 uverbs_cmd_mask;
Yann Droneaudf21519b2013-11-06 23:21:49 +01002619 u64 uverbs_ex_cmd_mask;
Roland Dreier274c0892005-09-29 14:17:48 -07002620
Yuval Shaiabd99fde2016-08-25 10:57:07 -07002621 char node_desc[IB_DEVICE_NODE_DESC_MAX];
Sean Heftycf311cd2006-01-10 07:39:34 -08002622 __be64 node_guid;
Steve Wise96f15c02008-07-14 23:48:53 -07002623 u32 local_dma_lkey;
Hal Rosenstock41390322015-06-29 09:57:00 -04002624 u16 is_switch:1;
Gal Pressman6780c4f2019-01-22 10:08:22 +02002625 /* Indicates kernel verbs support, should not be used in drivers */
2626 u16 kverbs_provider:1;
Yamin Friedmanda662972019-07-08 13:59:03 +03002627 /* CQ adaptive moderation (RDMA DIM) */
2628 u16 use_cq_dim:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 u8 node_type;
2630 u8 phys_port_cnt;
Ira Weiny3e153a92015-12-18 10:59:44 +02002631 struct ib_device_attr attrs;
Christoph Lameterb40f4752016-05-16 12:49:33 -05002632 struct attribute_group *hw_stats_ag;
2633 struct rdma_hw_stats *hw_stats;
Ira Weiny77386132015-05-13 20:02:58 -04002634
Parav Pandit43579b52017-01-10 00:02:14 +00002635#ifdef CONFIG_CGROUP_RDMA
2636 struct rdmacg_device cg_device;
2637#endif
2638
Leon Romanovskyecc82c52017-06-18 14:39:59 +03002639 u32 index;
Leon Romanovsky41eda652019-02-18 22:25:47 +02002640 struct rdma_restrack_root *res;
Leon Romanovskyecc82c52017-06-18 14:39:59 +03002641
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02002642 const struct uapi_definition *driver_def;
Jason Gunthorped79af722019-01-10 14:02:24 -07002643
Parav Pandit01b67112018-11-16 03:50:57 +02002644 /*
Jason Gunthorped79af722019-01-10 14:02:24 -07002645 * Positive refcount indicates that the device is currently
2646 * registered and cannot be unregistered.
Parav Pandit01b67112018-11-16 03:50:57 +02002647 */
2648 refcount_t refcount;
2649 struct completion unreg_completion;
Jason Gunthorped0899892019-02-12 21:12:53 -07002650 struct work_struct unregistration_work;
Steve Wise3856ec42019-02-15 11:03:53 -08002651
2652 const struct rdma_link_ops *link_ops;
Parav Pandit4e0f7b92019-02-26 13:56:13 +02002653
2654 /* Protects compat_devs xarray modifications */
2655 struct mutex compat_devs_mutex;
2656 /* Maintains compat devices for each net namespace */
2657 struct xarray compat_devs;
Kamal Heibdd05cb82019-04-29 14:59:06 +03002658
2659 /* Used by iWarp CM */
2660 char iw_ifname[IFNAMSIZ];
2661 u32 iw_driver_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662};
2663
Jason Gunthorpe0e2d00e2019-06-13 21:38:18 -03002664struct ib_client_nl_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665struct ib_client {
Jason Gunthorpee59178d2019-02-06 22:41:52 -07002666 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 void (*add) (struct ib_device *);
Haggai Eran7c1eb452015-07-30 17:50:14 +03002668 void (*remove)(struct ib_device *, void *client_data);
Leon Romanovskydc1435c2019-05-17 15:43:10 +03002669 void (*rename)(struct ib_device *dev, void *client_data);
Jason Gunthorpe0e2d00e2019-06-13 21:38:18 -03002670 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2671 struct ib_client_nl_info *res);
2672 int (*get_global_nl_info)(struct ib_client_nl_info *res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
Yotam Kenneth9268f722015-07-30 17:50:15 +03002674 /* Returns the net_dev belonging to this ib_client and matching the
2675 * given parameters.
2676 * @dev: An RDMA device that the net_dev use for communication.
2677 * @port: A physical port number on the RDMA device.
2678 * @pkey: P_Key that the net_dev uses if applicable.
2679 * @gid: A GID that the net_dev uses to communicate.
2680 * @addr: An IP address the net_dev is configured with.
2681 * @client_data: The device's client data set by ib_set_client_data().
2682 *
2683 * An ib_client that implements a net_dev on top of RDMA devices
2684 * (such as IP over IB) should implement this callback, allowing the
2685 * rdma_cm module to find the right net_dev for a given request.
2686 *
2687 * The caller is responsible for calling dev_put on the returned
2688 * netdev. */
2689 struct net_device *(*get_net_dev_by_params)(
2690 struct ib_device *dev,
2691 u8 port,
2692 u16 pkey,
2693 const union ib_gid *gid,
2694 const struct sockaddr *addr,
2695 void *client_data);
Jason Gunthorpe621e55f2019-07-31 11:18:40 +03002696
2697 refcount_t uses;
2698 struct completion uses_zero;
Jason Gunthorpee59178d2019-02-06 22:41:52 -07002699 u32 client_id;
Gal Pressman6780c4f2019-01-22 10:08:22 +02002700
2701 /* kverbs are not required by the client */
2702 u8 no_kverbs_req:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703};
2704
Shiraz Saleema8082732019-05-06 08:53:33 -05002705/*
2706 * IB block DMA iterator
2707 *
2708 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2709 * to a HW supported page size.
2710 */
2711struct ib_block_iter {
2712 /* internal states */
2713 struct scatterlist *__sg; /* sg holding the current aligned block */
2714 dma_addr_t __dma_addr; /* unaligned DMA address of this block */
2715 unsigned int __sg_nents; /* number of SG entries */
2716 unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
2717 unsigned int __pg_bit; /* alignment of current block */
2718};
2719
Leon Romanovsky459cc692019-01-30 12:49:11 +02002720struct ib_device *_ib_alloc_device(size_t size);
2721#define ib_alloc_device(drv_struct, member) \
2722 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2723 BUILD_BUG_ON_ZERO(offsetof( \
2724 struct drv_struct, member))), \
2725 struct drv_struct, member)
2726
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727void ib_dealloc_device(struct ib_device *device);
2728
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002729void ib_get_device_fw_str(struct ib_device *device, char *str);
Ira Weiny5fa76c22016-06-15 02:21:56 -04002730
Parav Panditea4baf72018-12-18 14:28:30 +02002731int ib_register_device(struct ib_device *device, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732void ib_unregister_device(struct ib_device *device);
Jason Gunthorped0899892019-02-12 21:12:53 -07002733void ib_unregister_driver(enum rdma_driver_id driver_id);
2734void ib_unregister_device_and_put(struct ib_device *device);
2735void ib_unregister_device_queued(struct ib_device *ib_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736
2737int ib_register_client (struct ib_client *client);
2738void ib_unregister_client(struct ib_client *client);
2739
Shiraz Saleema8082732019-05-06 08:53:33 -05002740void __rdma_block_iter_start(struct ib_block_iter *biter,
2741 struct scatterlist *sglist,
2742 unsigned int nents,
2743 unsigned long pgsz);
2744bool __rdma_block_iter_next(struct ib_block_iter *biter);
2745
2746/**
2747 * rdma_block_iter_dma_address - get the aligned dma address of the current
2748 * block held by the block iterator.
2749 * @biter: block iterator holding the memory block
2750 */
2751static inline dma_addr_t
2752rdma_block_iter_dma_address(struct ib_block_iter *biter)
2753{
2754 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2755}
2756
2757/**
2758 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2759 * @sglist: sglist to iterate over
2760 * @biter: block iterator holding the memory block
2761 * @nents: maximum number of sg entries to iterate over
2762 * @pgsz: best HW supported page size to use
2763 *
2764 * Callers may use rdma_block_iter_dma_address() to get each
2765 * blocks aligned DMA address.
2766 */
2767#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2768 for (__rdma_block_iter_start(biter, sglist, nents, \
2769 pgsz); \
2770 __rdma_block_iter_next(biter);)
2771
Jason Gunthorpe0df91bb2019-02-06 22:41:53 -07002772/**
2773 * ib_get_client_data - Get IB client context
2774 * @device:Device to get context for
2775 * @client:Client to get context for
2776 *
2777 * ib_get_client_data() returns the client context data set with
2778 * ib_set_client_data(). This can only be called while the client is
2779 * registered to the device, once the ib_client remove() callback returns this
2780 * cannot be called.
2781 */
2782static inline void *ib_get_client_data(struct ib_device *device,
2783 struct ib_client *client)
2784{
2785 return xa_load(&device->client_data, client->client_id);
2786}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2788 void *data);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002789void ib_set_device_ops(struct ib_device *device,
2790 const struct ib_device_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
Jason Gunthorpe5f9794d2018-09-16 20:43:08 +03002792#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2793int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2794 unsigned long pfn, unsigned long size, pgprot_t prot);
Jason Gunthorpe5f9794d2018-09-16 20:43:08 +03002795#else
2796static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
2797 struct vm_area_struct *vma,
2798 unsigned long pfn, unsigned long size,
2799 pgprot_t prot)
2800{
2801 return -EINVAL;
2802}
Jason Gunthorpe5f9794d2018-09-16 20:43:08 +03002803#endif
2804
Roland Dreiere2773c02005-07-07 17:57:10 -07002805static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2806{
2807 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2808}
2809
2810static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2811{
Yann Droneaud43c611652015-02-05 22:10:18 +01002812 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
Roland Dreiere2773c02005-07-07 17:57:10 -07002813}
2814
Matan Barakc66db312018-03-19 15:02:36 +02002815static inline bool ib_is_buffer_cleared(const void __user *p,
2816 size_t len)
Matan Barak301a7212015-12-15 20:30:10 +02002817{
Markus Elfring92d27ae2016-08-22 18:23:24 +02002818 bool ret;
Matan Barak301a7212015-12-15 20:30:10 +02002819 u8 *buf;
2820
2821 if (len > USHRT_MAX)
2822 return false;
2823
Markus Elfring92d27ae2016-08-22 18:23:24 +02002824 buf = memdup_user(p, len);
2825 if (IS_ERR(buf))
Matan Barak301a7212015-12-15 20:30:10 +02002826 return false;
2827
Matan Barak301a7212015-12-15 20:30:10 +02002828 ret = !memchr_inv(buf, 0, len);
Matan Barak301a7212015-12-15 20:30:10 +02002829 kfree(buf);
2830 return ret;
2831}
2832
Matan Barakc66db312018-03-19 15:02:36 +02002833static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2834 size_t offset,
2835 size_t len)
2836{
2837 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2838}
2839
Roland Dreier8a518662006-02-13 12:48:12 -08002840/**
Yishai Hadas1c774832018-06-20 17:11:39 +03002841 * ib_is_destroy_retryable - Check whether the uobject destruction
2842 * is retryable.
2843 * @ret: The initial destruction return code
2844 * @why: remove reason
2845 * @uobj: The uobject that is destroyed
2846 *
2847 * This function is a helper function that IB layer and low-level drivers
2848 * can use to consider whether the destruction of the given uobject is
2849 * retry-able.
2850 * It checks the original return code, if it wasn't success the destruction
2851 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2852 * the remove reason. (i.e. why).
2853 * Must be called with the object locked for destroy.
2854 */
2855static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2856 struct ib_uobject *uobj)
2857{
2858 return ret && (why == RDMA_REMOVE_DESTROY ||
2859 uobj->context->cleanup_retryable);
2860}
2861
2862/**
2863 * ib_destroy_usecnt - Called during destruction to check the usecnt
2864 * @usecnt: The usecnt atomic
2865 * @why: remove reason
2866 * @uobj: The uobject that is destroyed
2867 *
2868 * Non-zero usecnts will block destruction unless destruction was triggered by
2869 * a ucontext cleanup.
2870 */
2871static inline int ib_destroy_usecnt(atomic_t *usecnt,
2872 enum rdma_remove_reason why,
2873 struct ib_uobject *uobj)
2874{
2875 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2876 return -EBUSY;
2877 return 0;
2878}
2879
2880/**
Roland Dreier8a518662006-02-13 12:48:12 -08002881 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2882 * contains all required attributes and no attributes not allowed for
2883 * the given QP state transition.
2884 * @cur_state: Current QP state
2885 * @next_state: Next QP state
2886 * @type: QP type
2887 * @mask: Mask of supplied QP attributes
2888 *
2889 * This function is a helper function that a low-level driver's
2890 * modify_qp method can use to validate the consumer's input. It
2891 * checks that cur_state and next_state are valid QP states, that a
2892 * transition from cur_state to next_state is allowed by the IB spec,
2893 * and that the attribute mask supplied is allowed for the transition.
2894 */
Leon Romanovsky19b1f542018-03-11 13:51:35 +02002895bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Kamal Heibd31131b2018-10-02 16:11:21 +03002896 enum ib_qp_type type, enum ib_qp_attr_mask mask);
Roland Dreier8a518662006-02-13 12:48:12 -08002897
Leon Romanovskydcc98812017-08-17 15:50:36 +03002898void ib_register_event_handler(struct ib_event_handler *event_handler);
2899void ib_unregister_event_handler(struct ib_event_handler *event_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900void ib_dispatch_event(struct ib_event *event);
2901
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902int ib_query_port(struct ib_device *device,
2903 u8 port_num, struct ib_port_attr *port_attr);
2904
Eli Cohena3f5ada2010-09-27 17:51:10 -07002905enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2906 u8 port_num);
2907
Ira Weiny0cf18d72015-05-13 20:02:55 -04002908/**
Hal Rosenstock41390322015-06-29 09:57:00 -04002909 * rdma_cap_ib_switch - Check if the device is IB switch
2910 * @device: Device to check
2911 *
2912 * Device driver is responsible for setting is_switch bit on
2913 * in ib_device structure at init time.
2914 *
2915 * Return: true if the device is IB switch.
2916 */
2917static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2918{
2919 return device->is_switch;
2920}
2921
2922/**
Ira Weiny0cf18d72015-05-13 20:02:55 -04002923 * rdma_start_port - Return the first valid port number for the device
2924 * specified
2925 *
2926 * @device: Device to be checked
2927 *
2928 * Return start port number
2929 */
2930static inline u8 rdma_start_port(const struct ib_device *device)
2931{
Hal Rosenstock41390322015-06-29 09:57:00 -04002932 return rdma_cap_ib_switch(device) ? 0 : 1;
Ira Weiny0cf18d72015-05-13 20:02:55 -04002933}
2934
2935/**
Jason Gunthorpeea1075e2019-02-12 21:12:47 -07002936 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
2937 * @device - The struct ib_device * to iterate over
2938 * @iter - The unsigned int to store the port number
2939 */
2940#define rdma_for_each_port(device, iter) \
2941 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
2942 unsigned int, iter))); \
2943 iter <= rdma_end_port(device); (iter)++)
2944
2945/**
Ira Weiny0cf18d72015-05-13 20:02:55 -04002946 * rdma_end_port - Return the last valid port number for the device
2947 * specified
2948 *
2949 * @device: Device to be checked
2950 *
2951 * Return last port number
2952 */
2953static inline u8 rdma_end_port(const struct ib_device *device)
2954{
Hal Rosenstock41390322015-06-29 09:57:00 -04002955 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
Ira Weiny0cf18d72015-05-13 20:02:55 -04002956}
2957
Yuval Shaia24dc8312017-01-25 18:41:37 +02002958static inline int rdma_is_port_valid(const struct ib_device *device,
2959 unsigned int port)
2960{
2961 return (port >= rdma_start_port(device) &&
2962 port <= rdma_end_port(device));
2963}
2964
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03002965static inline bool rdma_is_grh_required(const struct ib_device *device,
2966 u8 port_num)
2967{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002968 return device->port_data[port_num].immutable.core_cap_flags &
2969 RDMA_CORE_PORT_IB_GRH_REQUIRED;
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03002970}
2971
Ira Weiny5ede9282015-05-31 17:15:29 -04002972static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02002973{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002974 return device->port_data[port_num].immutable.core_cap_flags &
2975 RDMA_CORE_CAP_PROT_IB;
Michael Wangde66be92015-05-05 14:50:19 +02002976}
2977
Ira Weiny5ede9282015-05-31 17:15:29 -04002978static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02002979{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002980 return device->port_data[port_num].immutable.core_cap_flags &
2981 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
Matan Barak7766a992015-12-23 14:56:50 +02002982}
2983
2984static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2985{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002986 return device->port_data[port_num].immutable.core_cap_flags &
2987 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
Matan Barak7766a992015-12-23 14:56:50 +02002988}
2989
2990static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2991{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002992 return device->port_data[port_num].immutable.core_cap_flags &
2993 RDMA_CORE_CAP_PROT_ROCE;
Michael Wangde66be92015-05-05 14:50:19 +02002994}
2995
Ira Weiny5ede9282015-05-31 17:15:29 -04002996static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02002997{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002998 return device->port_data[port_num].immutable.core_cap_flags &
2999 RDMA_CORE_CAP_PROT_IWARP;
Michael Wangde66be92015-05-05 14:50:19 +02003000}
3001
Ira Weiny5ede9282015-05-31 17:15:29 -04003002static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02003003{
Matan Barak7766a992015-12-23 14:56:50 +02003004 return rdma_protocol_ib(device, port_num) ||
3005 rdma_protocol_roce(device, port_num);
Michael Wangde66be92015-05-05 14:50:19 +02003006}
3007
Or Gerlitzaa773bd2017-01-24 13:02:35 +02003008static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3009{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003010 return device->port_data[port_num].immutable.core_cap_flags &
3011 RDMA_CORE_CAP_PROT_RAW_PACKET;
Or Gerlitzaa773bd2017-01-24 13:02:35 +02003012}
3013
Or Gerlitzce1e0552017-01-24 13:02:38 +02003014static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3015{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003016 return device->port_data[port_num].immutable.core_cap_flags &
3017 RDMA_CORE_CAP_PROT_USNIC;
Or Gerlitzce1e0552017-01-24 13:02:38 +02003018}
3019
Michael Wangc757dea2015-05-05 14:50:32 +02003020/**
Michael Wang296ec002015-05-18 10:41:45 +02003021 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
Michael Wangc757dea2015-05-05 14:50:32 +02003022 * Management Datagrams.
Michael Wang296ec002015-05-18 10:41:45 +02003023 * @device: Device to check
3024 * @port_num: Port number to check
Michael Wangc757dea2015-05-05 14:50:32 +02003025 *
Michael Wang296ec002015-05-18 10:41:45 +02003026 * Management Datagrams (MAD) are a required part of the InfiniBand
3027 * specification and are supported on all InfiniBand devices. A slightly
3028 * extended version are also supported on OPA interfaces.
Michael Wangc757dea2015-05-05 14:50:32 +02003029 *
Michael Wang296ec002015-05-18 10:41:45 +02003030 * Return: true if the port supports sending/receiving of MAD packets.
Michael Wangc757dea2015-05-05 14:50:32 +02003031 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003032static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
Michael Wangc757dea2015-05-05 14:50:32 +02003033{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003034 return device->port_data[port_num].immutable.core_cap_flags &
3035 RDMA_CORE_CAP_IB_MAD;
Michael Wangc757dea2015-05-05 14:50:32 +02003036}
3037
Michael Wang29541e32015-05-05 14:50:33 +02003038/**
Ira Weiny65995fe2015-06-06 14:38:32 -04003039 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3040 * Management Datagrams.
3041 * @device: Device to check
3042 * @port_num: Port number to check
3043 *
3044 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3045 * datagrams with their own versions. These OPA MADs share many but not all of
3046 * the characteristics of InfiniBand MADs.
3047 *
3048 * OPA MADs differ in the following ways:
3049 *
3050 * 1) MADs are variable size up to 2K
3051 * IBTA defined MADs remain fixed at 256 bytes
3052 * 2) OPA SMPs must carry valid PKeys
3053 * 3) OPA SMP packets are a different format
3054 *
3055 * Return: true if the port supports OPA MAD packet formats.
3056 */
3057static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3058{
Leon Romanovskyd3243da2019-03-10 17:27:46 +02003059 return device->port_data[port_num].immutable.core_cap_flags &
3060 RDMA_CORE_CAP_OPA_MAD;
Ira Weiny65995fe2015-06-06 14:38:32 -04003061}
3062
3063/**
Michael Wang296ec002015-05-18 10:41:45 +02003064 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3065 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3066 * @device: Device to check
3067 * @port_num: Port number to check
Michael Wang29541e32015-05-05 14:50:33 +02003068 *
Michael Wang296ec002015-05-18 10:41:45 +02003069 * Each InfiniBand node is required to provide a Subnet Management Agent
3070 * that the subnet manager can access. Prior to the fabric being fully
3071 * configured by the subnet manager, the SMA is accessed via a well known
3072 * interface called the Subnet Management Interface (SMI). This interface
3073 * uses directed route packets to communicate with the SM to get around the
3074 * chicken and egg problem of the SM needing to know what's on the fabric
3075 * in order to configure the fabric, and needing to configure the fabric in
3076 * order to send packets to the devices on the fabric. These directed
3077 * route packets do not need the fabric fully configured in order to reach
3078 * their destination. The SMI is the only method allowed to send
3079 * directed route packets on an InfiniBand fabric.
Michael Wang29541e32015-05-05 14:50:33 +02003080 *
Michael Wang296ec002015-05-18 10:41:45 +02003081 * Return: true if the port provides an SMI.
Michael Wang29541e32015-05-05 14:50:33 +02003082 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003083static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
Michael Wang29541e32015-05-05 14:50:33 +02003084{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003085 return device->port_data[port_num].immutable.core_cap_flags &
3086 RDMA_CORE_CAP_IB_SMI;
Michael Wang29541e32015-05-05 14:50:33 +02003087}
3088
Michael Wang72219cea2015-05-05 14:50:34 +02003089/**
3090 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3091 * Communication Manager.
Michael Wang296ec002015-05-18 10:41:45 +02003092 * @device: Device to check
3093 * @port_num: Port number to check
Michael Wang72219cea2015-05-05 14:50:34 +02003094 *
Michael Wang296ec002015-05-18 10:41:45 +02003095 * The InfiniBand Communication Manager is one of many pre-defined General
3096 * Service Agents (GSA) that are accessed via the General Service
3097 * Interface (GSI). It's role is to facilitate establishment of connections
3098 * between nodes as well as other management related tasks for established
3099 * connections.
Michael Wang72219cea2015-05-05 14:50:34 +02003100 *
Michael Wang296ec002015-05-18 10:41:45 +02003101 * Return: true if the port supports an IB CM (this does not guarantee that
3102 * a CM is actually running however).
Michael Wang72219cea2015-05-05 14:50:34 +02003103 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003104static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
Michael Wang72219cea2015-05-05 14:50:34 +02003105{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003106 return device->port_data[port_num].immutable.core_cap_flags &
3107 RDMA_CORE_CAP_IB_CM;
Michael Wang72219cea2015-05-05 14:50:34 +02003108}
3109
Michael Wang04215332015-05-05 14:50:35 +02003110/**
3111 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3112 * Communication Manager.
Michael Wang296ec002015-05-18 10:41:45 +02003113 * @device: Device to check
3114 * @port_num: Port number to check
Michael Wang04215332015-05-05 14:50:35 +02003115 *
Michael Wang296ec002015-05-18 10:41:45 +02003116 * Similar to above, but specific to iWARP connections which have a different
3117 * managment protocol than InfiniBand.
Michael Wang04215332015-05-05 14:50:35 +02003118 *
Michael Wang296ec002015-05-18 10:41:45 +02003119 * Return: true if the port supports an iWARP CM (this does not guarantee that
3120 * a CM is actually running however).
Michael Wang04215332015-05-05 14:50:35 +02003121 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003122static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
Michael Wang04215332015-05-05 14:50:35 +02003123{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003124 return device->port_data[port_num].immutable.core_cap_flags &
3125 RDMA_CORE_CAP_IW_CM;
Michael Wang04215332015-05-05 14:50:35 +02003126}
3127
Michael Wangfe53ba22015-05-05 14:50:36 +02003128/**
3129 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3130 * Subnet Administration.
Michael Wang296ec002015-05-18 10:41:45 +02003131 * @device: Device to check
3132 * @port_num: Port number to check
Michael Wangfe53ba22015-05-05 14:50:36 +02003133 *
Michael Wang296ec002015-05-18 10:41:45 +02003134 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3135 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
3136 * fabrics, devices should resolve routes to other hosts by contacting the
3137 * SA to query the proper route.
Michael Wangfe53ba22015-05-05 14:50:36 +02003138 *
Michael Wang296ec002015-05-18 10:41:45 +02003139 * Return: true if the port should act as a client to the fabric Subnet
3140 * Administration interface. This does not imply that the SA service is
3141 * running locally.
Michael Wangfe53ba22015-05-05 14:50:36 +02003142 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003143static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
Michael Wangfe53ba22015-05-05 14:50:36 +02003144{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003145 return device->port_data[port_num].immutable.core_cap_flags &
3146 RDMA_CORE_CAP_IB_SA;
Michael Wangfe53ba22015-05-05 14:50:36 +02003147}
3148
Michael Wanga31ad3b2015-05-05 14:50:37 +02003149/**
3150 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3151 * Multicast.
Michael Wang296ec002015-05-18 10:41:45 +02003152 * @device: Device to check
3153 * @port_num: Port number to check
Michael Wanga31ad3b2015-05-05 14:50:37 +02003154 *
Michael Wang296ec002015-05-18 10:41:45 +02003155 * InfiniBand multicast registration is more complex than normal IPv4 or
3156 * IPv6 multicast registration. Each Host Channel Adapter must register
3157 * with the Subnet Manager when it wishes to join a multicast group. It
3158 * should do so only once regardless of how many queue pairs it subscribes
3159 * to this group. And it should leave the group only after all queue pairs
3160 * attached to the group have been detached.
Michael Wanga31ad3b2015-05-05 14:50:37 +02003161 *
Michael Wang296ec002015-05-18 10:41:45 +02003162 * Return: true if the port must undertake the additional adminstrative
3163 * overhead of registering/unregistering with the SM and tracking of the
3164 * total number of queue pairs attached to the multicast group.
Michael Wanga31ad3b2015-05-05 14:50:37 +02003165 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003166static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
Michael Wanga31ad3b2015-05-05 14:50:37 +02003167{
3168 return rdma_cap_ib_sa(device, port_num);
3169}
3170
Michael Wangbc0f1d72015-05-05 14:50:38 +02003171/**
Michael Wang30a74ef2015-05-05 14:50:39 +02003172 * rdma_cap_af_ib - Check if the port of device has the capability
3173 * Native Infiniband Address.
Michael Wang296ec002015-05-18 10:41:45 +02003174 * @device: Device to check
3175 * @port_num: Port number to check
Michael Wang30a74ef2015-05-05 14:50:39 +02003176 *
Michael Wang296ec002015-05-18 10:41:45 +02003177 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3178 * GID. RoCE uses a different mechanism, but still generates a GID via
3179 * a prescribed mechanism and port specific data.
Michael Wang30a74ef2015-05-05 14:50:39 +02003180 *
Michael Wang296ec002015-05-18 10:41:45 +02003181 * Return: true if the port uses a GID address to identify devices on the
3182 * network.
Michael Wang30a74ef2015-05-05 14:50:39 +02003183 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003184static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
Michael Wang30a74ef2015-05-05 14:50:39 +02003185{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003186 return device->port_data[port_num].immutable.core_cap_flags &
3187 RDMA_CORE_CAP_AF_IB;
Michael Wang30a74ef2015-05-05 14:50:39 +02003188}
3189
3190/**
Michael Wang227128f2015-05-05 14:50:40 +02003191 * rdma_cap_eth_ah - Check if the port of device has the capability
Michael Wang296ec002015-05-18 10:41:45 +02003192 * Ethernet Address Handle.
3193 * @device: Device to check
3194 * @port_num: Port number to check
Michael Wang227128f2015-05-05 14:50:40 +02003195 *
Michael Wang296ec002015-05-18 10:41:45 +02003196 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3197 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3198 * port. Normally, packet headers are generated by the sending host
3199 * adapter, but when sending connectionless datagrams, we must manually
3200 * inject the proper headers for the fabric we are communicating over.
Michael Wang227128f2015-05-05 14:50:40 +02003201 *
Michael Wang296ec002015-05-18 10:41:45 +02003202 * Return: true if we are running as a RoCE port and must force the
3203 * addition of a Global Route Header built from our Ethernet Address
3204 * Handle into our header list for connectionless packets.
Michael Wang227128f2015-05-05 14:50:40 +02003205 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003206static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
Michael Wang227128f2015-05-05 14:50:40 +02003207{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003208 return device->port_data[port_num].immutable.core_cap_flags &
3209 RDMA_CORE_CAP_ETH_AH;
Michael Wang227128f2015-05-05 14:50:40 +02003210}
3211
3212/**
Dasaratharaman Chandramouli94d595c2017-03-20 19:38:09 -04003213 * rdma_cap_opa_ah - Check if the port of device supports
3214 * OPA Address handles
3215 * @device: Device to check
3216 * @port_num: Port number to check
3217 *
3218 * Return: true if we are running on an OPA device which supports
3219 * the extended OPA addressing.
3220 */
3221static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3222{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003223 return (device->port_data[port_num].immutable.core_cap_flags &
Dasaratharaman Chandramouli94d595c2017-03-20 19:38:09 -04003224 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3225}
3226
3227/**
Ira Weiny337877a2015-06-06 14:38:29 -04003228 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3229 *
3230 * @device: Device
3231 * @port_num: Port number
3232 *
3233 * This MAD size includes the MAD headers and MAD payload. No other headers
3234 * are included.
3235 *
3236 * Return the max MAD size required by the Port. Will return 0 if the port
3237 * does not support MADs
3238 */
3239static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3240{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003241 return device->port_data[port_num].immutable.max_mad_size;
Ira Weiny337877a2015-06-06 14:38:29 -04003242}
3243
Matan Barak03db3a22015-07-30 18:33:26 +03003244/**
3245 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3246 * @device: Device to check
3247 * @port_num: Port number to check
3248 *
3249 * RoCE GID table mechanism manages the various GIDs for a device.
3250 *
3251 * NOTE: if allocating the port's GID table has failed, this call will still
3252 * return true, but any RoCE GID table API will fail.
3253 *
3254 * Return: true if the port uses RoCE GID table mechanism in order to manage
3255 * its GIDs.
3256 */
3257static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3258 u8 port_num)
3259{
3260 return rdma_protocol_roce(device, port_num) &&
Kamal Heib3023a1e2018-12-10 21:09:48 +02003261 device->ops.add_gid && device->ops.del_gid;
Matan Barak03db3a22015-07-30 18:33:26 +03003262}
3263
Christoph Hellwig002516e2016-05-03 18:01:05 +02003264/*
3265 * Check if the device supports READ W/ INVALIDATE.
3266 */
3267static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3268{
3269 /*
3270 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3271 * has support for it yet.
3272 */
3273 return rdma_protocol_iwarp(dev, port_num);
3274}
3275
Shiraz Saleem4a353392019-05-06 08:53:32 -05003276/**
3277 * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
3278 *
3279 * @addr: address
3280 * @pgsz_bitmap: bitmap of HW supported page sizes
3281 */
3282static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3283 unsigned long pgsz_bitmap)
3284{
3285 unsigned long align;
3286 unsigned long pgsz;
3287
3288 align = addr & -addr;
3289
3290 /* Find page bit such that addr is aligned to the highest supported
3291 * HW page size
3292 */
3293 pgsz = pgsz_bitmap & ~(-align << 1);
3294 if (!pgsz)
3295 return __ffs(pgsz_bitmap);
3296
3297 return __fls(pgsz);
3298}
3299
Eli Cohen50174a72016-03-11 22:58:38 +02003300int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3301 int state);
3302int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3303 struct ifla_vf_info *info);
3304int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3305 struct ifla_vf_stats *stats);
3306int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3307 int type);
3308
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309int ib_query_pkey(struct ib_device *device,
3310 u8 port_num, u16 index, u16 *pkey);
3311
3312int ib_modify_device(struct ib_device *device,
3313 int device_modify_mask,
3314 struct ib_device_modify *device_modify);
3315
3316int ib_modify_port(struct ib_device *device,
3317 u8 port_num, int port_modify_mask,
3318 struct ib_port_modify *port_modify);
3319
Yosef Etigin5eb620c2007-05-14 07:26:51 +03003320int ib_find_gid(struct ib_device *device, union ib_gid *gid,
Parav Panditb26c4a12018-03-13 16:06:12 +02003321 u8 *port_num, u16 *index);
Yosef Etigin5eb620c2007-05-14 07:26:51 +03003322
3323int ib_find_pkey(struct ib_device *device,
3324 u8 port_num, u16 pkey, u16 *index);
3325
Christoph Hellwiged082d32016-09-05 12:56:17 +02003326enum ib_pd_flags {
3327 /*
3328 * Create a memory registration for all memory in the system and place
3329 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3330 * ULPs to avoid the overhead of dynamic MRs.
3331 *
3332 * This flag is generally considered unsafe and must only be used in
3333 * extremly trusted environments. Every use of it will log a warning
3334 * in the kernel log.
3335 */
3336 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3337};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338
Christoph Hellwiged082d32016-09-05 12:56:17 +02003339struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3340 const char *caller);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003341
Christoph Hellwiged082d32016-09-05 12:56:17 +02003342#define ib_alloc_pd(device, flags) \
Leon Romanovskye4496442018-01-28 11:17:18 +02003343 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003344
3345/**
3346 * ib_dealloc_pd_user - Deallocate kernel/user PD
3347 * @pd: The protection domain
3348 * @udata: Valid user data or NULL for kernel objects
3349 */
3350void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3351
3352/**
3353 * ib_dealloc_pd - Deallocate kernel PD
3354 * @pd: The protection domain
3355 *
3356 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3357 */
3358static inline void ib_dealloc_pd(struct ib_pd *pd)
3359{
3360 ib_dealloc_pd_user(pd, NULL);
3361}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362
Gal Pressmanb090c4e2018-12-12 11:09:05 +02003363enum rdma_create_ah_flags {
3364 /* In a sleepable context */
3365 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3366};
3367
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368/**
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -04003369 * rdma_create_ah - Creates an address handle for the given address vector.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 * @pd: The protection domain associated with the address handle.
3371 * @ah_attr: The attributes of the address vector.
Gal Pressmanb090c4e2018-12-12 11:09:05 +02003372 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 *
3374 * The address handle is used to reference a local or global destination
3375 * in all UD QP post sends.
3376 */
Gal Pressmanb090c4e2018-12-12 11:09:05 +02003377struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3378 u32 flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
3380/**
Parav Pandit5cda6582017-10-16 08:45:12 +03003381 * rdma_create_user_ah - Creates an address handle for the given address vector.
3382 * It resolves destination mac address for ah attribute of RoCE type.
3383 * @pd: The protection domain associated with the address handle.
3384 * @ah_attr: The attributes of the address vector.
3385 * @udata: pointer to user's input output buffer information need by
3386 * provider driver.
3387 *
3388 * It returns 0 on success and returns appropriate error code on error.
3389 * The address handle is used to reference a local or global destination
3390 * in all UD QP post sends.
3391 */
3392struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3393 struct rdma_ah_attr *ah_attr,
3394 struct ib_udata *udata);
3395/**
Moni Shoua850d8fd2016-11-10 11:30:56 +02003396 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3397 * work completion.
3398 * @hdr: the L3 header to parse
3399 * @net_type: type of header to parse
3400 * @sgid: place to store source gid
3401 * @dgid: place to store destination gid
3402 */
3403int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3404 enum rdma_network_type net_type,
3405 union ib_gid *sgid, union ib_gid *dgid);
3406
3407/**
3408 * ib_get_rdma_header_version - Get the header version
3409 * @hdr: the L3 header to parse
3410 */
3411int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3412
3413/**
Parav Panditf6bdb142017-11-14 14:52:17 +02003414 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
Sean Hefty4e00d692006-06-17 20:37:39 -07003415 * work completion.
3416 * @device: Device on which the received message arrived.
3417 * @port_num: Port on which the received message arrived.
3418 * @wc: Work completion associated with the received message.
3419 * @grh: References the received global route header. This parameter is
3420 * ignored unless the work completion indicates that the GRH is valid.
3421 * @ah_attr: Returned attributes that can be used when creating an address
3422 * handle for replying to the message.
Parav Panditb7403212018-06-19 10:59:14 +03003423 * When ib_init_ah_attr_from_wc() returns success,
3424 * (a) for IB link layer it optionally contains a reference to SGID attribute
3425 * when GRH is present for IB link layer.
3426 * (b) for RoCE link layer it contains a reference to SGID attribute.
3427 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3428 * attributes which are initialized using ib_init_ah_attr_from_wc().
3429 *
Sean Hefty4e00d692006-06-17 20:37:39 -07003430 */
Parav Panditf6bdb142017-11-14 14:52:17 +02003431int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3432 const struct ib_wc *wc, const struct ib_grh *grh,
3433 struct rdma_ah_attr *ah_attr);
Sean Hefty4e00d692006-06-17 20:37:39 -07003434
3435/**
Hal Rosenstock513789e2005-07-27 11:45:34 -07003436 * ib_create_ah_from_wc - Creates an address handle associated with the
3437 * sender of the specified work completion.
3438 * @pd: The protection domain associated with the address handle.
3439 * @wc: Work completion information associated with a received message.
3440 * @grh: References the received global route header. This parameter is
3441 * ignored unless the work completion indicates that the GRH is valid.
3442 * @port_num: The outbound port number to associate with the address.
3443 *
3444 * The address handle is used to reference a local or global destination
3445 * in all UD QP post sends.
3446 */
Ira Weiny73cdaae2015-05-31 17:15:31 -04003447struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3448 const struct ib_grh *grh, u8 port_num);
Hal Rosenstock513789e2005-07-27 11:45:34 -07003449
3450/**
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -04003451 * rdma_modify_ah - Modifies the address vector associated with an address
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 * handle.
3453 * @ah: The address handle to modify.
3454 * @ah_attr: The new address vector attributes to associate with the
3455 * address handle.
3456 */
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -04003457int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
3459/**
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -04003460 * rdma_query_ah - Queries the address vector associated with an address
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 * handle.
3462 * @ah: The address handle to query.
3463 * @ah_attr: The address vector attributes associated with the address
3464 * handle.
3465 */
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -04003466int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
Gal Pressman2553ba22018-12-12 11:09:06 +02003468enum rdma_destroy_ah_flags {
3469 /* In a sleepable context */
3470 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3471};
3472
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003474 * rdma_destroy_ah_user - Destroys an address handle.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 * @ah: The address handle to destroy.
Gal Pressman2553ba22018-12-12 11:09:06 +02003476 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003477 * @udata: Valid user data or NULL for kernel objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003479int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3480
3481/**
3482 * rdma_destroy_ah - Destroys an kernel address handle.
3483 * @ah: The address handle to destroy.
3484 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3485 *
3486 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3487 */
3488static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3489{
3490 return rdma_destroy_ah_user(ah, flags, NULL);
3491}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
3493/**
Roland Dreierd41fcc62005-08-18 12:23:08 -07003494 * ib_create_srq - Creates a SRQ associated with the specified protection
3495 * domain.
3496 * @pd: The protection domain associated with the SRQ.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08003497 * @srq_init_attr: A list of initial attributes required to create the
3498 * SRQ. If SRQ creation succeeds, then the attributes are updated to
3499 * the actual capabilities of the created SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07003500 *
3501 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3502 * requested size of the SRQ, and set to the actual values allocated
3503 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
3504 * will always be at least as large as the requested values.
3505 */
3506struct ib_srq *ib_create_srq(struct ib_pd *pd,
3507 struct ib_srq_init_attr *srq_init_attr);
3508
3509/**
3510 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3511 * @srq: The SRQ to modify.
3512 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3513 * the current values of selected SRQ attributes are returned.
3514 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3515 * are being modified.
3516 *
3517 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3518 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3519 * the number of receives queued drops below the limit.
3520 */
3521int ib_modify_srq(struct ib_srq *srq,
3522 struct ib_srq_attr *srq_attr,
3523 enum ib_srq_attr_mask srq_attr_mask);
3524
3525/**
3526 * ib_query_srq - Returns the attribute list and current values for the
3527 * specified SRQ.
3528 * @srq: The SRQ to query.
3529 * @srq_attr: The attributes of the specified SRQ.
3530 */
3531int ib_query_srq(struct ib_srq *srq,
3532 struct ib_srq_attr *srq_attr);
3533
3534/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003535 * ib_destroy_srq_user - Destroys the specified SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07003536 * @srq: The SRQ to destroy.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003537 * @udata: Valid user data or NULL for kernel objects
Roland Dreierd41fcc62005-08-18 12:23:08 -07003538 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003539int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3540
3541/**
3542 * ib_destroy_srq - Destroys the specified kernel SRQ.
3543 * @srq: The SRQ to destroy.
3544 *
3545 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3546 */
3547static inline int ib_destroy_srq(struct ib_srq *srq)
3548{
3549 return ib_destroy_srq_user(srq, NULL);
3550}
Roland Dreierd41fcc62005-08-18 12:23:08 -07003551
3552/**
3553 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3554 * @srq: The SRQ to post the work request on.
3555 * @recv_wr: A list of work requests to post on the receive queue.
3556 * @bad_recv_wr: On an immediate failure, this parameter will reference
3557 * the work request that failed to be posted on the QP.
3558 */
3559static inline int ib_post_srq_recv(struct ib_srq *srq,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003560 const struct ib_recv_wr *recv_wr,
3561 const struct ib_recv_wr **bad_recv_wr)
Roland Dreierd41fcc62005-08-18 12:23:08 -07003562{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003563 const struct ib_recv_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003564
Kamal Heib3023a1e2018-12-10 21:09:48 +02003565 return srq->device->ops.post_srq_recv(srq, recv_wr,
3566 bad_recv_wr ? : &dummy);
Roland Dreierd41fcc62005-08-18 12:23:08 -07003567}
3568
3569/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003570 * ib_create_qp_user - Creates a QP associated with the specified protection
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 * domain.
3572 * @pd: The protection domain associated with the QP.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08003573 * @qp_init_attr: A list of initial attributes required to create the
3574 * QP. If QP creation succeeds, then the attributes are updated to
3575 * the actual capabilities of the created QP.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003576 * @udata: Valid user data or NULL for kernel objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003578struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
3579 struct ib_qp_init_attr *qp_init_attr,
3580 struct ib_udata *udata);
3581
3582/**
3583 * ib_create_qp - Creates a kernel QP associated with the specified protection
3584 * domain.
3585 * @pd: The protection domain associated with the QP.
3586 * @qp_init_attr: A list of initial attributes required to create the
3587 * QP. If QP creation succeeds, then the attributes are updated to
3588 * the actual capabilities of the created QP.
3589 * @udata: Valid user data or NULL for kernel objects
3590 *
3591 * NOTE: for user qp use ib_create_qp_user with valid udata!
3592 */
3593static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3594 struct ib_qp_init_attr *qp_init_attr)
3595{
3596 return ib_create_qp_user(pd, qp_init_attr, NULL);
3597}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598
3599/**
Parav Pandita512c2f2017-05-23 11:26:08 +03003600 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3601 * @qp: The QP to modify.
3602 * @attr: On input, specifies the QP attributes to modify. On output,
3603 * the current values of selected QP attributes are returned.
3604 * @attr_mask: A bit-mask used to specify which attributes of the QP
3605 * are being modified.
3606 * @udata: pointer to user's input output buffer information
3607 * are being modified.
3608 * It returns 0 on success and returns appropriate error code on error.
3609 */
3610int ib_modify_qp_with_udata(struct ib_qp *qp,
3611 struct ib_qp_attr *attr,
3612 int attr_mask,
3613 struct ib_udata *udata);
3614
3615/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 * ib_modify_qp - Modifies the attributes for the specified QP and then
3617 * transitions the QP to the given state.
3618 * @qp: The QP to modify.
3619 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3620 * the current values of selected QP attributes are returned.
3621 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3622 * are being modified.
3623 */
3624int ib_modify_qp(struct ib_qp *qp,
3625 struct ib_qp_attr *qp_attr,
3626 int qp_attr_mask);
3627
3628/**
3629 * ib_query_qp - Returns the attribute list and current values for the
3630 * specified QP.
3631 * @qp: The QP to query.
3632 * @qp_attr: The attributes of the specified QP.
3633 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3634 * @qp_init_attr: Additional attributes of the selected QP.
3635 *
3636 * The qp_attr_mask may be used to limit the query to gathering only the
3637 * selected attributes.
3638 */
3639int ib_query_qp(struct ib_qp *qp,
3640 struct ib_qp_attr *qp_attr,
3641 int qp_attr_mask,
3642 struct ib_qp_init_attr *qp_init_attr);
3643
3644/**
3645 * ib_destroy_qp - Destroys the specified QP.
3646 * @qp: The QP to destroy.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003647 * @udata: Valid udata or NULL for kernel objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003649int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3650
3651/**
3652 * ib_destroy_qp - Destroys the specified kernel QP.
3653 * @qp: The QP to destroy.
3654 *
3655 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3656 */
3657static inline int ib_destroy_qp(struct ib_qp *qp)
3658{
3659 return ib_destroy_qp_user(qp, NULL);
3660}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661
3662/**
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003663 * ib_open_qp - Obtain a reference to an existing sharable QP.
3664 * @xrcd - XRC domain
3665 * @qp_open_attr: Attributes identifying the QP to open.
3666 *
3667 * Returns a reference to a sharable QP.
3668 */
3669struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3670 struct ib_qp_open_attr *qp_open_attr);
3671
3672/**
3673 * ib_close_qp - Release an external reference to a QP.
Sean Heftyd3d72d92011-05-26 23:06:44 -07003674 * @qp: The QP handle to release
3675 *
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003676 * The opened QP handle is released by the caller. The underlying
3677 * shared QP is not destroyed until all internal references are released.
Sean Heftyd3d72d92011-05-26 23:06:44 -07003678 */
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003679int ib_close_qp(struct ib_qp *qp);
Sean Heftyd3d72d92011-05-26 23:06:44 -07003680
3681/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 * ib_post_send - Posts a list of work requests to the send queue of
3683 * the specified QP.
3684 * @qp: The QP to post the work request on.
3685 * @send_wr: A list of work requests to post on the send queue.
3686 * @bad_send_wr: On an immediate failure, this parameter will reference
3687 * the work request that failed to be posted on the QP.
Bart Van Assche55464d42009-12-09 14:20:04 -08003688 *
3689 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3690 * error is returned, the QP state shall not be affected,
3691 * ib_post_send() will return an immediate error after queueing any
3692 * earlier work requests in the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 */
3694static inline int ib_post_send(struct ib_qp *qp,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003695 const struct ib_send_wr *send_wr,
3696 const struct ib_send_wr **bad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003698 const struct ib_send_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003699
Kamal Heib3023a1e2018-12-10 21:09:48 +02003700 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701}
3702
3703/**
3704 * ib_post_recv - Posts a list of work requests to the receive queue of
3705 * the specified QP.
3706 * @qp: The QP to post the work request on.
3707 * @recv_wr: A list of work requests to post on the receive queue.
3708 * @bad_recv_wr: On an immediate failure, this parameter will reference
3709 * the work request that failed to be posted on the QP.
3710 */
3711static inline int ib_post_recv(struct ib_qp *qp,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003712 const struct ib_recv_wr *recv_wr,
3713 const struct ib_recv_wr **bad_recv_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003715 const struct ib_recv_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003716
Kamal Heib3023a1e2018-12-10 21:09:48 +02003717 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718}
3719
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003720struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3721 int nr_cqe, int comp_vector,
3722 enum ib_poll_context poll_ctx,
3723 const char *caller, struct ib_udata *udata);
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02003724
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003725/**
3726 * ib_alloc_cq_user: Allocate kernel/user CQ
3727 * @dev: The IB device
3728 * @private: Private data attached to the CQE
3729 * @nr_cqe: Number of CQEs in the CQ
3730 * @comp_vector: Completion vector used for the IRQs
3731 * @poll_ctx: Context used for polling the CQ
3732 * @udata: Valid user data or NULL for kernel objects
3733 */
3734static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3735 void *private, int nr_cqe,
3736 int comp_vector,
3737 enum ib_poll_context poll_ctx,
3738 struct ib_udata *udata)
3739{
3740 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3741 KBUILD_MODNAME, udata);
3742}
3743
3744/**
3745 * ib_alloc_cq: Allocate kernel CQ
3746 * @dev: The IB device
3747 * @private: Private data attached to the CQE
3748 * @nr_cqe: Number of CQEs in the CQ
3749 * @comp_vector: Completion vector used for the IRQs
3750 * @poll_ctx: Context used for polling the CQ
3751 *
3752 * NOTE: for user cq use ib_alloc_cq_user with valid udata!
3753 */
3754static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3755 int nr_cqe, int comp_vector,
3756 enum ib_poll_context poll_ctx)
3757{
3758 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3759 NULL);
3760}
3761
Chuck Lever20cf4e02019-07-29 13:22:09 -04003762struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3763 int nr_cqe, enum ib_poll_context poll_ctx,
3764 const char *caller);
3765
3766/**
3767 * ib_alloc_cq_any: Allocate kernel CQ
3768 * @dev: The IB device
3769 * @private: Private data attached to the CQE
3770 * @nr_cqe: Number of CQEs in the CQ
3771 * @poll_ctx: Context used for polling the CQ
3772 */
3773static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3774 void *private, int nr_cqe,
3775 enum ib_poll_context poll_ctx)
3776{
3777 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3778 KBUILD_MODNAME);
3779}
3780
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003781/**
3782 * ib_free_cq_user - Free kernel/user CQ
3783 * @cq: The CQ to free
3784 * @udata: Valid user data or NULL for kernel objects
3785 */
3786void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3787
3788/**
3789 * ib_free_cq - Free kernel CQ
3790 * @cq: The CQ to free
3791 *
3792 * NOTE: for user cq use ib_free_cq_user with valid udata!
3793 */
3794static inline void ib_free_cq(struct ib_cq *cq)
3795{
3796 ib_free_cq_user(cq, NULL);
3797}
3798
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08003799int ib_process_cq_direct(struct ib_cq *cq, int budget);
3800
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801/**
3802 * ib_create_cq - Creates a CQ on the specified device.
3803 * @device: The device on which to create the CQ.
3804 * @comp_handler: A user-specified callback that is invoked when a
3805 * completion event occurs on the CQ.
3806 * @event_handler: A user-specified callback that is invoked when an
3807 * asynchronous event not associated with a completion occurs on the CQ.
3808 * @cq_context: Context associated with the CQ returned to the user via
3809 * the associated completion and event handlers.
Matan Barak8e372102015-06-11 16:35:21 +03003810 * @cq_attr: The attributes the CQ should be created upon.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811 *
3812 * Users can examine the cq structure to determine the actual CQ size.
3813 */
Bharat Potnuri7350cdd2018-06-15 20:52:33 +05303814struct ib_cq *__ib_create_cq(struct ib_device *device,
3815 ib_comp_handler comp_handler,
3816 void (*event_handler)(struct ib_event *, void *),
3817 void *cq_context,
3818 const struct ib_cq_init_attr *cq_attr,
3819 const char *caller);
3820#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3821 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822
3823/**
3824 * ib_resize_cq - Modifies the capacity of the CQ.
3825 * @cq: The CQ to resize.
3826 * @cqe: The minimum size of the CQ.
3827 *
3828 * Users can examine the cq structure to determine the actual CQ size.
3829 */
3830int ib_resize_cq(struct ib_cq *cq, int cqe);
3831
3832/**
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02003833 * rdma_set_cq_moderation - Modifies moderation params of the CQ
Eli Cohen2dd57162008-04-16 21:09:33 -07003834 * @cq: The CQ to modify.
3835 * @cq_count: number of CQEs that will trigger an event
3836 * @cq_period: max period of time in usec before triggering an event
3837 *
3838 */
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02003839int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
Eli Cohen2dd57162008-04-16 21:09:33 -07003840
3841/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003842 * ib_destroy_cq_user - Destroys the specified CQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843 * @cq: The CQ to destroy.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003844 * @udata: Valid user data or NULL for kernel objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003846int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3847
3848/**
3849 * ib_destroy_cq - Destroys the specified kernel CQ.
3850 * @cq: The CQ to destroy.
3851 *
3852 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3853 */
Leon Romanovsky890ac8d2019-05-20 09:54:21 +03003854static inline void ib_destroy_cq(struct ib_cq *cq)
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003855{
Leon Romanovsky890ac8d2019-05-20 09:54:21 +03003856 ib_destroy_cq_user(cq, NULL);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003857}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858
3859/**
3860 * ib_poll_cq - poll a CQ for completion(s)
3861 * @cq:the CQ being polled
3862 * @num_entries:maximum number of completions to return
3863 * @wc:array of at least @num_entries &struct ib_wc where completions
3864 * will be returned
3865 *
3866 * Poll a CQ for (possibly multiple) completions. If the return value
3867 * is < 0, an error occurred. If the return value is >= 0, it is the
3868 * number of completions returned. If the return value is
3869 * non-negative and < num_entries, then the CQ was emptied.
3870 */
3871static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3872 struct ib_wc *wc)
3873{
Kamal Heib3023a1e2018-12-10 21:09:48 +02003874 return cq->device->ops.poll_cq(cq, num_entries, wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875}
3876
3877/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 * ib_req_notify_cq - Request completion notification on a CQ.
3879 * @cq: The CQ to generate an event for.
Roland Dreiered23a722007-05-06 21:02:48 -07003880 * @flags:
3881 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3882 * to request an event on the next solicited event or next work
3883 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3884 * may also be |ed in to request a hint about missed events, as
3885 * described below.
3886 *
3887 * Return Value:
3888 * < 0 means an error occurred while requesting notification
3889 * == 0 means notification was requested successfully, and if
3890 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3891 * were missed and it is safe to wait for another event. In
3892 * this case is it guaranteed that any work completions added
3893 * to the CQ since the last CQ poll will trigger a completion
3894 * notification event.
3895 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3896 * in. It means that the consumer must poll the CQ again to
3897 * make sure it is empty to avoid missing an event because of a
3898 * race between requesting notification and an entry being
3899 * added to the CQ. This return value means it is possible
3900 * (but not guaranteed) that a work completion has been added
3901 * to the CQ since the last poll without triggering a
3902 * completion notification event.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003903 */
3904static inline int ib_req_notify_cq(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07003905 enum ib_cq_notify_flags flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906{
Kamal Heib3023a1e2018-12-10 21:09:48 +02003907 return cq->device->ops.req_notify_cq(cq, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908}
3909
3910/**
3911 * ib_req_ncomp_notif - Request completion notification when there are
3912 * at least the specified number of unreaped completions on the CQ.
3913 * @cq: The CQ to generate an event for.
3914 * @wc_cnt: The number of unreaped completions that should be on the
3915 * CQ before an event is generated.
3916 */
3917static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3918{
Kamal Heib3023a1e2018-12-10 21:09:48 +02003919 return cq->device->ops.req_ncomp_notif ?
3920 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 -ENOSYS;
3922}
3923
3924/**
Ralph Campbell9b513092006-12-12 14:27:41 -08003925 * ib_dma_mapping_error - check a DMA addr for error
3926 * @dev: The device for which the dma_addr was created
3927 * @dma_addr: The DMA address to check
3928 */
3929static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3930{
Bart Van Assche0957c292017-03-07 22:56:53 +00003931 return dma_mapping_error(dev->dma_device, dma_addr);
Ralph Campbell9b513092006-12-12 14:27:41 -08003932}
3933
3934/**
3935 * ib_dma_map_single - Map a kernel virtual address to DMA address
3936 * @dev: The device for which the dma_addr is to be created
3937 * @cpu_addr: The kernel virtual address
3938 * @size: The size of the region in bytes
3939 * @direction: The direction of the DMA
3940 */
3941static inline u64 ib_dma_map_single(struct ib_device *dev,
3942 void *cpu_addr, size_t size,
3943 enum dma_data_direction direction)
3944{
Bart Van Assche0957c292017-03-07 22:56:53 +00003945 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003946}
3947
3948/**
3949 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3950 * @dev: The device for which the DMA address was created
3951 * @addr: The DMA address
3952 * @size: The size of the region in bytes
3953 * @direction: The direction of the DMA
3954 */
3955static inline void ib_dma_unmap_single(struct ib_device *dev,
3956 u64 addr, size_t size,
3957 enum dma_data_direction direction)
3958{
Bart Van Assche0957c292017-03-07 22:56:53 +00003959 dma_unmap_single(dev->dma_device, addr, size, direction);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07003960}
3961
Ralph Campbell9b513092006-12-12 14:27:41 -08003962/**
3963 * ib_dma_map_page - Map a physical page to DMA address
3964 * @dev: The device for which the dma_addr is to be created
3965 * @page: The page to be mapped
3966 * @offset: The offset within the page
3967 * @size: The size of the region in bytes
3968 * @direction: The direction of the DMA
3969 */
3970static inline u64 ib_dma_map_page(struct ib_device *dev,
3971 struct page *page,
3972 unsigned long offset,
3973 size_t size,
3974 enum dma_data_direction direction)
3975{
Bart Van Assche0957c292017-03-07 22:56:53 +00003976 return dma_map_page(dev->dma_device, page, offset, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003977}
3978
3979/**
3980 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3981 * @dev: The device for which the DMA address was created
3982 * @addr: The DMA address
3983 * @size: The size of the region in bytes
3984 * @direction: The direction of the DMA
3985 */
3986static inline void ib_dma_unmap_page(struct ib_device *dev,
3987 u64 addr, size_t size,
3988 enum dma_data_direction direction)
3989{
Bart Van Assche0957c292017-03-07 22:56:53 +00003990 dma_unmap_page(dev->dma_device, addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003991}
3992
3993/**
3994 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3995 * @dev: The device for which the DMA addresses are to be created
3996 * @sg: The array of scatter/gather entries
3997 * @nents: The number of scatter/gather entries
3998 * @direction: The direction of the DMA
3999 */
4000static inline int ib_dma_map_sg(struct ib_device *dev,
4001 struct scatterlist *sg, int nents,
4002 enum dma_data_direction direction)
4003{
Bart Van Assche0957c292017-03-07 22:56:53 +00004004 return dma_map_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08004005}
4006
4007/**
4008 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4009 * @dev: The device for which the DMA addresses were created
4010 * @sg: The array of scatter/gather entries
4011 * @nents: The number of scatter/gather entries
4012 * @direction: The direction of the DMA
4013 */
4014static inline void ib_dma_unmap_sg(struct ib_device *dev,
4015 struct scatterlist *sg, int nents,
4016 enum dma_data_direction direction)
4017{
Bart Van Assche0957c292017-03-07 22:56:53 +00004018 dma_unmap_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08004019}
4020
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004021static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4022 struct scatterlist *sg, int nents,
4023 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07004024 unsigned long dma_attrs)
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004025{
Bart Van Assche0957c292017-03-07 22:56:53 +00004026 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4027 dma_attrs);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004028}
4029
4030static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4031 struct scatterlist *sg, int nents,
4032 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07004033 unsigned long dma_attrs)
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004034{
Bart Van Assche0957c292017-03-07 22:56:53 +00004035 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004036}
Ralph Campbell9b513092006-12-12 14:27:41 -08004037
4038/**
Bart Van Assche0b5cb332019-01-22 10:25:20 -08004039 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4040 * @dev: The device to query
4041 *
4042 * The returned value represents a size in bytes.
4043 */
4044static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4045{
4046 struct device_dma_parameters *p = dev->dma_device->dma_parms;
4047
4048 return p ? p->max_segment_size : UINT_MAX;
4049}
4050
4051/**
Ralph Campbell9b513092006-12-12 14:27:41 -08004052 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4053 * @dev: The device for which the DMA address was created
4054 * @addr: The DMA address
4055 * @size: The size of the region in bytes
4056 * @dir: The direction of the DMA
4057 */
4058static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4059 u64 addr,
4060 size_t size,
4061 enum dma_data_direction dir)
4062{
Bart Van Assche0957c292017-03-07 22:56:53 +00004063 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08004064}
4065
4066/**
4067 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4068 * @dev: The device for which the DMA address was created
4069 * @addr: The DMA address
4070 * @size: The size of the region in bytes
4071 * @dir: The direction of the DMA
4072 */
4073static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4074 u64 addr,
4075 size_t size,
4076 enum dma_data_direction dir)
4077{
Bart Van Assche0957c292017-03-07 22:56:53 +00004078 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08004079}
4080
4081/**
4082 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4083 * @dev: The device for which the DMA address is requested
4084 * @size: The size of the region to allocate in bytes
4085 * @dma_handle: A pointer for returning the DMA address of the region
4086 * @flag: memory allocator flags
4087 */
4088static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4089 size_t size,
Bart Van Assched43dbac2017-01-20 13:04:10 -08004090 dma_addr_t *dma_handle,
Ralph Campbell9b513092006-12-12 14:27:41 -08004091 gfp_t flag)
4092{
Bart Van Assche0957c292017-03-07 22:56:53 +00004093 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
Ralph Campbell9b513092006-12-12 14:27:41 -08004094}
4095
4096/**
4097 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4098 * @dev: The device for which the DMA addresses were allocated
4099 * @size: The size of the region
4100 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4101 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4102 */
4103static inline void ib_dma_free_coherent(struct ib_device *dev,
4104 size_t size, void *cpu_addr,
Bart Van Assched43dbac2017-01-20 13:04:10 -08004105 dma_addr_t dma_handle)
Ralph Campbell9b513092006-12-12 14:27:41 -08004106{
Bart Van Assche0957c292017-03-07 22:56:53 +00004107 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
Ralph Campbell9b513092006-12-12 14:27:41 -08004108}
4109
4110/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004111 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4112 * HCA translation table.
4113 * @mr: The memory region to deregister.
4114 * @udata: Valid user data or NULL for kernel object
4115 *
4116 * This function can fail, if the memory region has memory windows bound to it.
4117 */
4118int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4119
4120/**
4121 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122 * HCA translation table.
4123 * @mr: The memory region to deregister.
Shani Michaeli7083e422013-02-06 16:19:12 +00004124 *
4125 * This function can fail, if the memory region has memory windows bound to it.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004126 *
4127 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004129static inline int ib_dereg_mr(struct ib_mr *mr)
4130{
4131 return ib_dereg_mr_user(mr, NULL);
4132}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004134struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4135 u32 max_num_sg, struct ib_udata *udata);
4136
4137static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4138 enum ib_mr_type mr_type, u32 max_num_sg)
4139{
4140 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4141}
Steve Wise00f7ec32008-07-14 23:48:45 -07004142
Israel Rukshin26bc7ea2019-06-11 18:52:39 +03004143struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4144 u32 max_num_data_sg,
4145 u32 max_num_meta_sg);
4146
Steve Wise00f7ec32008-07-14 23:48:45 -07004147/**
Steve Wise00f7ec32008-07-14 23:48:45 -07004148 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4149 * R_Key and L_Key.
4150 * @mr - struct ib_mr pointer to be updated.
4151 * @newkey - new key to be used.
4152 */
4153static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4154{
4155 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4156 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4157}
4158
4159/**
Shani Michaeli7083e422013-02-06 16:19:12 +00004160 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4161 * for calculating a new rkey for type 2 memory windows.
4162 * @rkey - the rkey to increment.
4163 */
4164static inline u32 ib_inc_rkey(u32 rkey)
4165{
4166 const u32 mask = 0x000000ff;
4167 return ((rkey + 1) & mask) | (rkey & ~mask);
4168}
4169
4170/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 * ib_alloc_fmr - Allocates a unmapped fast memory region.
4172 * @pd: The protection domain associated with the unmapped region.
4173 * @mr_access_flags: Specifies the memory access rights.
4174 * @fmr_attr: Attributes of the unmapped region.
4175 *
4176 * A fast memory region must be mapped before it can be used as part of
4177 * a work request.
4178 */
4179struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
4180 int mr_access_flags,
4181 struct ib_fmr_attr *fmr_attr);
4182
4183/**
4184 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
4185 * @fmr: The fast memory region to associate with the pages.
4186 * @page_list: An array of physical pages to map to the fast memory region.
4187 * @list_len: The number of pages in page_list.
4188 * @iova: The I/O virtual address to use with the mapped region.
4189 */
4190static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
4191 u64 *page_list, int list_len,
4192 u64 iova)
4193{
Kamal Heib3023a1e2018-12-10 21:09:48 +02004194 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195}
4196
4197/**
4198 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
4199 * @fmr_list: A linked list of fast memory regions to unmap.
4200 */
4201int ib_unmap_fmr(struct list_head *fmr_list);
4202
4203/**
4204 * ib_dealloc_fmr - Deallocates a fast memory region.
4205 * @fmr: The fast memory region to deallocate.
4206 */
4207int ib_dealloc_fmr(struct ib_fmr *fmr);
4208
4209/**
4210 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4211 * @qp: QP to attach to the multicast group. The QP must be type
4212 * IB_QPT_UD.
4213 * @gid: Multicast group GID.
4214 * @lid: Multicast group LID in host byte order.
4215 *
4216 * In order to send and receive multicast packets, subnet
4217 * administration must have created the multicast group and configured
4218 * the fabric appropriately. The port associated with the specified
4219 * QP must also be a member of the multicast group.
4220 */
4221int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4222
4223/**
4224 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4225 * @qp: QP to detach from the multicast group.
4226 * @gid: Multicast group GID.
4227 * @lid: Multicast group LID in host byte order.
4228 */
4229int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4230
Sean Hefty59991f92011-05-23 17:52:46 -07004231/**
4232 * ib_alloc_xrcd - Allocates an XRC domain.
4233 * @device: The device on which to allocate the XRC domain.
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02004234 * @caller: Module name for kernel consumers
Sean Hefty59991f92011-05-23 17:52:46 -07004235 */
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02004236struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4237#define ib_alloc_xrcd(device) \
4238 __ib_alloc_xrcd((device), KBUILD_MODNAME)
Sean Hefty59991f92011-05-23 17:52:46 -07004239
4240/**
4241 * ib_dealloc_xrcd - Deallocates an XRC domain.
4242 * @xrcd: The XRC domain to deallocate.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004243 * @udata: Valid user data or NULL for kernel object
Sean Hefty59991f92011-05-23 17:52:46 -07004244 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004245int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
Sean Hefty59991f92011-05-23 17:52:46 -07004246
Eli Cohen1c636f82013-10-31 15:26:32 +02004247static inline int ib_check_mr_access(int flags)
4248{
4249 /*
4250 * Local write permission is required if remote write or
4251 * remote atomic permission is also requested.
4252 */
4253 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4254 !(flags & IB_ACCESS_LOCAL_WRITE))
4255 return -EINVAL;
4256
4257 return 0;
4258}
4259
Jack Morgenstein08bb5582018-05-23 15:30:30 +03004260static inline bool ib_access_writable(int access_flags)
4261{
4262 /*
4263 * We have writable memory backing the MR if any of the following
4264 * access flags are set. "Local write" and "remote write" obviously
4265 * require write access. "Remote atomic" can do things like fetch and
4266 * add, which will modify memory, and "MW bind" can change permissions
4267 * by binding a window.
4268 */
4269 return access_flags &
4270 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4271 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4272}
4273
Sagi Grimberg1b01d332014-02-23 14:19:05 +02004274/**
4275 * ib_check_mr_status: lightweight check of MR status.
4276 * This routine may provide status checks on a selected
4277 * ib_mr. first use is for signature status check.
4278 *
4279 * @mr: A memory region.
4280 * @check_mask: Bitmask of which checks to perform from
4281 * ib_mr_status_check enumeration.
4282 * @mr_status: The container of relevant status checks.
4283 * failed checks will be indicated in the status bitmask
4284 * and the relevant info shall be in the error item.
4285 */
4286int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4287 struct ib_mr_status *mr_status);
4288
Jason Gunthorped79af722019-01-10 14:02:24 -07004289/**
4290 * ib_device_try_get: Hold a registration lock
4291 * device: The device to lock
4292 *
4293 * A device under an active registration lock cannot become unregistered. It
4294 * is only possible to obtain a registration lock on a device that is fully
4295 * registered, otherwise this function returns false.
4296 *
4297 * The registration lock is only necessary for actions which require the
4298 * device to still be registered. Uses that only require the device pointer to
4299 * be valid should use get_device(&ibdev->dev) to hold the memory.
4300 *
4301 */
4302static inline bool ib_device_try_get(struct ib_device *dev)
4303{
4304 return refcount_inc_not_zero(&dev->refcount);
4305}
4306
4307void ib_device_put(struct ib_device *device);
Jason Gunthorpe324e2272019-02-12 21:12:51 -07004308struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4309 enum rdma_driver_id driver_id);
4310struct ib_device *ib_device_get_by_name(const char *name,
4311 enum rdma_driver_id driver_id);
Yotam Kenneth9268f722015-07-30 17:50:15 +03004312struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4313 u16 pkey, const union ib_gid *gid,
4314 const struct sockaddr *addr);
Jason Gunthorpec2261dd2019-02-12 21:12:50 -07004315int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4316 unsigned int port);
4317struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4318
Yishai Hadas5fd251c2016-05-23 15:20:48 +03004319struct ib_wq *ib_create_wq(struct ib_pd *pd,
4320 struct ib_wq_init_attr *init_attr);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004321int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
Yishai Hadas5fd251c2016-05-23 15:20:48 +03004322int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4323 u32 wq_attr_mask);
Yishai Hadas6d397862016-05-23 15:20:51 +03004324struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4325 struct ib_rwq_ind_table_init_attr*
4326 wq_ind_table_init_attr);
4327int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
Yotam Kenneth9268f722015-07-30 17:50:15 +03004328
Christoph Hellwigff2ba992016-05-03 18:01:04 +02004329int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07004330 unsigned int *sg_offset, unsigned int page_size);
Max Gurtovoy2cdfcdd2019-06-11 18:52:40 +03004331int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4332 int data_sg_nents, unsigned int *data_sg_offset,
4333 struct scatterlist *meta_sg, int meta_sg_nents,
4334 unsigned int *meta_sg_offset, unsigned int page_size);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03004335
4336static inline int
Christoph Hellwigff2ba992016-05-03 18:01:04 +02004337ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07004338 unsigned int *sg_offset, unsigned int page_size)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03004339{
4340 int n;
4341
Christoph Hellwigff2ba992016-05-03 18:01:04 +02004342 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03004343 mr->iova = 0;
4344
4345 return n;
4346}
4347
Christoph Hellwigff2ba992016-05-03 18:01:04 +02004348int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07004349 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03004350
Steve Wise765d6772016-02-17 08:15:41 -08004351void ib_drain_rq(struct ib_qp *qp);
4352void ib_drain_sq(struct ib_qp *qp);
4353void ib_drain_qp(struct ib_qp *qp);
Moni Shoua850d8fd2016-11-10 11:30:56 +02004354
Yuval Shaiad4186192017-06-14 23:13:34 +03004355int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004356
4357static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4358{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004359 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4360 return attr->roce.dmac;
4361 return NULL;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004362}
4363
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004364static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004365{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004366 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004367 attr->ib.dlid = (u16)dlid;
4368 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4369 attr->opa.dlid = dlid;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004370}
4371
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004372static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004373{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004374 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4375 return attr->ib.dlid;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004376 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4377 return attr->opa.dlid;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004378 return 0;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004379}
4380
4381static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4382{
4383 attr->sl = sl;
4384}
4385
4386static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4387{
4388 return attr->sl;
4389}
4390
4391static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4392 u8 src_path_bits)
4393{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004394 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4395 attr->ib.src_path_bits = src_path_bits;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004396 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4397 attr->opa.src_path_bits = src_path_bits;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004398}
4399
4400static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4401{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004402 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4403 return attr->ib.src_path_bits;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004404 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4405 return attr->opa.src_path_bits;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004406 return 0;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004407}
4408
Don Hiattd98bb7f2017-08-04 13:54:16 -07004409static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4410 bool make_grd)
4411{
4412 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4413 attr->opa.make_grd = make_grd;
4414}
4415
4416static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4417{
4418 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4419 return attr->opa.make_grd;
4420 return false;
4421}
4422
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004423static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4424{
4425 attr->port_num = port_num;
4426}
4427
4428static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4429{
4430 return attr->port_num;
4431}
4432
4433static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4434 u8 static_rate)
4435{
4436 attr->static_rate = static_rate;
4437}
4438
4439static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4440{
4441 return attr->static_rate;
4442}
4443
4444static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4445 enum ib_ah_flags flag)
4446{
4447 attr->ah_flags = flag;
4448}
4449
4450static inline enum ib_ah_flags
4451 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4452{
4453 return attr->ah_flags;
4454}
4455
4456static inline const struct ib_global_route
4457 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4458{
4459 return &attr->grh;
4460}
4461
4462/*To retrieve and modify the grh */
4463static inline struct ib_global_route
4464 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4465{
4466 return &attr->grh;
4467}
4468
4469static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4470{
4471 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4472
4473 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4474}
4475
4476static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4477 __be64 prefix)
4478{
4479 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4480
4481 grh->dgid.global.subnet_prefix = prefix;
4482}
4483
4484static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4485 __be64 if_id)
4486{
4487 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4488
4489 grh->dgid.global.interface_id = if_id;
4490}
4491
4492static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4493 union ib_gid *dgid, u32 flow_label,
4494 u8 sgid_index, u8 hop_limit,
4495 u8 traffic_class)
4496{
4497 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4498
4499 attr->ah_flags = IB_AH_GRH;
4500 if (dgid)
4501 grh->dgid = *dgid;
4502 grh->flow_label = flow_label;
4503 grh->sgid_index = sgid_index;
4504 grh->hop_limit = hop_limit;
4505 grh->traffic_class = traffic_class;
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004506 grh->sgid_attr = NULL;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004507}
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004508
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004509void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4510void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4511 u32 flow_label, u8 hop_limit, u8 traffic_class,
4512 const struct ib_gid_attr *sgid_attr);
Jason Gunthorped97099f2018-06-13 10:22:05 +03004513void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4514 const struct rdma_ah_attr *src);
4515void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4516 const struct rdma_ah_attr *new);
4517void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004518
Don Hiatt87daac62018-02-01 10:57:03 -08004519/**
4520 * rdma_ah_find_type - Return address handle type.
4521 *
4522 * @dev: Device to be checked
4523 * @port_num: Port number
4524 */
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004525static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
Don Hiatt87daac62018-02-01 10:57:03 -08004526 u8 port_num)
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004527{
Parav Pandita6532e72018-01-12 07:58:42 +02004528 if (rdma_protocol_roce(dev, port_num))
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004529 return RDMA_AH_ATTR_TYPE_ROCE;
Don Hiatt87daac62018-02-01 10:57:03 -08004530 if (rdma_protocol_ib(dev, port_num)) {
4531 if (rdma_cap_opa_ah(dev, port_num))
4532 return RDMA_AH_ATTR_TYPE_OPA;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004533 return RDMA_AH_ATTR_TYPE_IB;
Don Hiatt87daac62018-02-01 10:57:03 -08004534 }
4535
4536 return RDMA_AH_ATTR_TYPE_UNDEFINED;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004537}
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004538
Hiatt, Don62ede772017-08-14 14:17:43 -04004539/**
4540 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4541 * In the current implementation the only way to get
4542 * get the 32bit lid is from other sources for OPA.
4543 * For IB, lids will always be 16bits so cast the
4544 * value accordingly.
4545 *
4546 * @lid: A 32bit LID
4547 */
4548static inline u16 ib_lid_cpu16(u32 lid)
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004549{
Hiatt, Don62ede772017-08-14 14:17:43 -04004550 WARN_ON_ONCE(lid & 0xFFFF0000);
4551 return (u16)lid;
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004552}
4553
Hiatt, Don62ede772017-08-14 14:17:43 -04004554/**
4555 * ib_lid_be16 - Return lid in 16bit BE encoding.
4556 *
4557 * @lid: A 32bit LID
4558 */
4559static inline __be16 ib_lid_be16(u32 lid)
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004560{
Hiatt, Don62ede772017-08-14 14:17:43 -04004561 WARN_ON_ONCE(lid & 0xFFFF0000);
4562 return cpu_to_be16((u16)lid);
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004563}
Doug Ledford32043832017-08-10 14:31:29 -04004564
Sagi Grimbergc66cd352017-07-13 11:09:41 +03004565/**
4566 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4567 * vector
4568 * @device: the rdma device
4569 * @comp_vector: index of completion vector
4570 *
4571 * Returns NULL on failure, otherwise a corresponding cpu map of the
4572 * completion vector (returns all-cpus map if the device driver doesn't
4573 * implement get_vector_affinity).
4574 */
4575static inline const struct cpumask *
4576ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4577{
4578 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
Kamal Heib3023a1e2018-12-10 21:09:48 +02004579 !device->ops.get_vector_affinity)
Sagi Grimbergc66cd352017-07-13 11:09:41 +03004580 return NULL;
4581
Kamal Heib3023a1e2018-12-10 21:09:48 +02004582 return device->ops.get_vector_affinity(device, comp_vector);
Sagi Grimbergc66cd352017-07-13 11:09:41 +03004583
4584}
4585
Daniel Jurgens32f69e42018-01-04 17:25:36 +02004586/**
4587 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4588 * and add their gids, as needed, to the relevant RoCE devices.
4589 *
4590 * @device: the rdma device
4591 */
4592void rdma_roce_rescan_device(struct ib_device *ibdev);
4593
Jason Gunthorpe8313c102018-11-25 20:51:13 +02004594struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
Yishai Hadas7dc08dc2018-06-17 12:59:59 +03004595
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02004596int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
Denis Drozdovf6a8a192018-08-14 14:08:51 +03004597
4598struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4599 enum rdma_netdev_t type, const char *name,
4600 unsigned char name_assign_type,
4601 void (*setup)(struct net_device *));
Denis Drozdov5d6b0cb2018-08-14 14:22:35 +03004602
4603int rdma_init_netdev(struct ib_device *device, u8 port_num,
4604 enum rdma_netdev_t type, const char *name,
4605 unsigned char name_assign_type,
4606 void (*setup)(struct net_device *),
4607 struct net_device *netdev);
4608
Parav Panditd4122f52018-10-11 22:31:53 +03004609/**
4610 * rdma_set_device_sysfs_group - Set device attributes group to have
4611 * driver specific sysfs entries at
4612 * for infiniband class.
4613 *
4614 * @device: device pointer for which attributes to be created
4615 * @group: Pointer to group which should be added when device
4616 * is registered with sysfs.
4617 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4618 * group per device to have sysfs attributes.
4619 *
4620 * NOTE: New drivers should not make use of this API; instead new device
4621 * parameter should be exposed via netlink command. This API and mechanism
4622 * exist only for existing drivers.
4623 */
4624static inline void
4625rdma_set_device_sysfs_group(struct ib_device *dev,
4626 const struct attribute_group *group)
4627{
4628 dev->groups[1] = group;
4629}
4630
Parav Pandit54747232018-12-18 14:15:56 +02004631/**
4632 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4633 *
4634 * @device: device pointer for which ib_device pointer to retrieve
4635 *
4636 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4637 *
4638 */
4639static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4640{
Parav Panditcebe5562019-02-26 13:56:11 +02004641 struct ib_core_device *coredev =
4642 container_of(device, struct ib_core_device, dev);
4643
4644 return coredev->owner;
Parav Pandit54747232018-12-18 14:15:56 +02004645}
4646
4647/**
4648 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4649 * ib_device holder structure from device pointer.
4650 *
4651 * NOTE: New drivers should not make use of this API; This API is only for
4652 * existing drivers who have exposed sysfs entries using
4653 * rdma_set_device_sysfs_group().
4654 */
4655#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4656 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
Parav Pandit41c61402019-02-26 14:01:46 +02004657
4658bool rdma_dev_access_netns(const struct ib_device *device,
4659 const struct net *net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660#endif /* IB_VERBS_H */