blob: 5608e14e3aadf893570e4189c81cb5472a27e184 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08008 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
Ralph Campbell9b513092006-12-12 14:27:41 -080044#include <linux/dma-mapping.h>
Michael S. Tsirkin459d6e22007-02-04 14:11:55 -080045#include <linux/kref.h>
Dotan Barakbfb3ea12007-07-31 16:49:15 +030046#include <linux/list.h>
47#include <linux/rwsem.h>
Tejun Heof0626712010-10-19 15:24:36 +000048#include <linux/workqueue.h>
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080049#include <linux/irq_poll.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020050#include <uapi/linux/if_ether.h>
Somnath Koturc865f242015-12-23 14:56:51 +020051#include <net/ipv6.h>
52#include <net/ip.h>
Matan Barak301a7212015-12-15 20:30:10 +020053#include <linux/string.h>
54#include <linux/slab.h>
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -070055#include <linux/netdevice.h>
Parav Pandit01b67112018-11-16 03:50:57 +020056#include <linux/refcount.h>
Eli Cohen50174a72016-03-11 22:58:38 +020057#include <linux/if_link.h>
Arun Sharma600634972011-07-26 16:09:06 -070058#include <linux/atomic.h>
Haggai Eran882214e2014-12-11 17:04:18 +020059#include <linux/mmu_notifier.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080060#include <linux/uaccess.h>
Parav Pandit43579b52017-01-10 00:02:14 +000061#include <linux/cgroup_rdma.h>
Leon Romanovskyf6316032019-03-28 15:12:58 +020062#include <linux/irqflags.h>
63#include <linux/preempt.h>
Yamin Friedmanda662972019-07-08 13:59:03 +030064#include <linux/dim.h>
Nicolas Dichtelea6819e2017-03-27 14:20:14 +020065#include <uapi/rdma/ib_user_verbs.h>
Mark Zhang413d3342019-07-02 13:02:34 +030066#include <rdma/rdma_counter.h>
Leon Romanovsky02d88832018-01-28 11:17:20 +020067#include <rdma/restrack.h>
Max Gurtovoy36b1e472019-06-11 18:52:37 +030068#include <rdma/signature.h>
Matan Barak0ede73b2018-03-19 15:02:34 +020069#include <uapi/rdma/rdma_user_ioctl.h>
Matan Barak2eb9bea2018-03-28 09:27:45 +030070#include <uapi/rdma/ib_user_ioctl_verbs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Leon Romanovsky9abb0d12017-06-27 16:49:53 +030072#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
73
Jason Gunthorpeb5231b02018-09-16 20:48:04 +030074struct ib_umem_odp;
75
Tejun Heof0626712010-10-19 15:24:36 +000076extern struct workqueue_struct *ib_wq;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080077extern struct workqueue_struct *ib_comp_wq;
Jack Morgensteinf7948092018-08-27 08:35:55 +030078extern struct workqueue_struct *ib_comp_unbound_wq;
Tejun Heof0626712010-10-19 15:24:36 +000079
Gal Pressman923abb92019-05-01 13:48:13 +030080__printf(3, 4) __cold
81void ibdev_printk(const char *level, const struct ib_device *ibdev,
82 const char *format, ...);
83__printf(2, 3) __cold
84void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
85__printf(2, 3) __cold
86void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
87__printf(2, 3) __cold
88void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
89__printf(2, 3) __cold
90void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
91__printf(2, 3) __cold
92void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
93__printf(2, 3) __cold
94void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
95__printf(2, 3) __cold
96void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
97
98#if defined(CONFIG_DYNAMIC_DEBUG)
99#define ibdev_dbg(__dev, format, args...) \
100 dynamic_ibdev_dbg(__dev, format, ##args)
Gal Pressman923abb92019-05-01 13:48:13 +0300101#else
102__printf(2, 3) __cold
103static inline
104void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
105#endif
106
Gal Pressman05bb4112019-08-01 20:14:46 +0300107#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
108do { \
109 static DEFINE_RATELIMIT_STATE(_rs, \
110 DEFAULT_RATELIMIT_INTERVAL, \
111 DEFAULT_RATELIMIT_BURST); \
112 if (__ratelimit(&_rs)) \
113 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
114} while (0)
115
116#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
117 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
118#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
119 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
120#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
121 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
122#define ibdev_err_ratelimited(ibdev, fmt, ...) \
123 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
124#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
125 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
126#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
127 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
128#define ibdev_info_ratelimited(ibdev, fmt, ...) \
129 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
130
131#if defined(CONFIG_DYNAMIC_DEBUG)
132/* descriptor check is first to prevent flooding with "callbacks suppressed" */
133#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
134do { \
135 static DEFINE_RATELIMIT_STATE(_rs, \
136 DEFAULT_RATELIMIT_INTERVAL, \
137 DEFAULT_RATELIMIT_BURST); \
138 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
139 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
140 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
141 ##__VA_ARGS__); \
142} while (0)
143#else
144__printf(2, 3) __cold
145static inline
146void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
147#endif
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149union ib_gid {
150 u8 raw[16];
151 struct {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700152 __be64 subnet_prefix;
153 __be64 interface_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 } global;
155};
156
Moni Shouae26be1b2015-07-30 18:33:29 +0300157extern union ib_gid zgid;
158
Matan Barakb39ffa12015-12-23 14:56:47 +0200159enum ib_gid_type {
160 /* If link layer is Ethernet, this is RoCE V1 */
161 IB_GID_TYPE_IB = 0,
162 IB_GID_TYPE_ROCE = 0,
Matan Barak7766a992015-12-23 14:56:50 +0200163 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
Matan Barakb39ffa12015-12-23 14:56:47 +0200164 IB_GID_TYPE_SIZE
165};
166
Moni Shoua7ead4bc2016-01-14 17:50:38 +0200167#define ROCE_V2_UDP_DPORT 4791
Matan Barak03db3a22015-07-30 18:33:26 +0300168struct ib_gid_attr {
Parav Pandit943bd982019-05-02 10:48:07 +0300169 struct net_device __rcu *ndev;
Parav Pandit598ff6b2018-04-01 15:08:21 +0300170 struct ib_device *device;
Parav Panditb150c382018-06-05 08:40:15 +0300171 union ib_gid gid;
Parav Pandit598ff6b2018-04-01 15:08:21 +0300172 enum ib_gid_type gid_type;
173 u16 index;
174 u8 port_num;
Matan Barak03db3a22015-07-30 18:33:26 +0300175};
176
Eli Cohena0c1b2a2016-03-11 22:58:37 +0200177enum {
178 /* set the local administered indication */
179 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
180};
181
Tom Tucker07ebafb2006-08-03 16:02:42 -0500182enum rdma_transport_type {
183 RDMA_TRANSPORT_IB,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000184 RDMA_TRANSPORT_IWARP,
Upinder Malhi248567f2014-01-09 14:48:19 -0800185 RDMA_TRANSPORT_USNIC,
Gal Pressmanf95be3d2019-05-05 20:59:21 +0300186 RDMA_TRANSPORT_USNIC_UDP,
187 RDMA_TRANSPORT_UNSPECIFIED,
Tom Tucker07ebafb2006-08-03 16:02:42 -0500188};
189
Michael Wang6b90a6d2015-05-05 14:50:18 +0200190enum rdma_protocol_type {
191 RDMA_PROTOCOL_IB,
192 RDMA_PROTOCOL_IBOE,
193 RDMA_PROTOCOL_IWARP,
194 RDMA_PROTOCOL_USNIC_UDP
195};
196
Roland Dreier8385fd82014-06-04 10:00:16 -0700197__attribute_const__ enum rdma_transport_type
Jason Gunthorpe5d60c112019-06-13 21:38:17 -0300198rdma_node_get_transport(unsigned int node_type);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500199
Somnath Koturc865f242015-12-23 14:56:51 +0200200enum rdma_network_type {
201 RDMA_NETWORK_IB,
202 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
203 RDMA_NETWORK_IPV4,
204 RDMA_NETWORK_IPV6
205};
206
207static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
208{
209 if (network_type == RDMA_NETWORK_IPV4 ||
210 network_type == RDMA_NETWORK_IPV6)
211 return IB_GID_TYPE_ROCE_UDP_ENCAP;
212
213 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
214 return IB_GID_TYPE_IB;
215}
216
Parav Pandit47ec3862018-06-13 10:22:06 +0300217static inline enum rdma_network_type
218rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
Somnath Koturc865f242015-12-23 14:56:51 +0200219{
Parav Pandit47ec3862018-06-13 10:22:06 +0300220 if (attr->gid_type == IB_GID_TYPE_IB)
Somnath Koturc865f242015-12-23 14:56:51 +0200221 return RDMA_NETWORK_IB;
222
Parav Pandit47ec3862018-06-13 10:22:06 +0300223 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
Somnath Koturc865f242015-12-23 14:56:51 +0200224 return RDMA_NETWORK_IPV4;
225 else
226 return RDMA_NETWORK_IPV6;
227}
228
Eli Cohena3f5ada2010-09-27 17:51:10 -0700229enum rdma_link_layer {
230 IB_LINK_LAYER_UNSPECIFIED,
231 IB_LINK_LAYER_INFINIBAND,
232 IB_LINK_LAYER_ETHERNET,
233};
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235enum ib_device_cap_flags {
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200236 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
237 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
238 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
239 IB_DEVICE_RAW_MULTI = (1 << 3),
240 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
241 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
242 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
243 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
244 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300245 /* Not in use, former INIT_TYPE = (1 << 9),*/
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200246 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
247 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
248 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
249 IB_DEVICE_SRQ_RESIZE = (1 << 13),
250 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
Christoph Hellwigb1adc712015-12-23 19:12:45 +0100251
252 /*
253 * This device supports a per-device lkey or stag that can be
254 * used without performing a memory registration for the local
255 * memory. Note that ULPs should never check this flag, but
256 * instead of use the local_dma_lkey flag in the ib_pd structure,
257 * which will always contain a usable lkey.
258 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200259 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300260 /* Reserved, old SEND_W_INV = (1 << 16),*/
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200261 IB_DEVICE_MEM_WINDOW = (1 << 17),
Eli Cohene0605d92008-01-30 18:30:57 +0200262 /*
263 * Devices should set IB_DEVICE_UD_IP_SUM if they support
264 * insertion of UDP and TCP checksum on outgoing UD IPoIB
265 * messages and can verify the validity of checksum for
266 * incoming messages. Setting this flag implies that the
267 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
268 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200269 IB_DEVICE_UD_IP_CSUM = (1 << 18),
270 IB_DEVICE_UD_TSO = (1 << 19),
271 IB_DEVICE_XRC = (1 << 20),
Christoph Hellwigb1adc712015-12-23 19:12:45 +0100272
273 /*
274 * This device supports the IB "base memory management extension",
275 * which includes support for fast registrations (IB_WR_REG_MR,
276 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
277 * also be set by any iWarp device which must support FRs to comply
278 * to the iWarp verbs spec. iWarp devices also support the
279 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
280 * stag.
281 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200282 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
283 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
284 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
285 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
286 IB_DEVICE_RC_IP_CSUM = (1 << 25),
Noa Osherovichebaaee22017-01-18 15:39:54 +0200287 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200288 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
Leon Romanovsky8a06ce52015-12-20 12:16:10 +0200289 /*
290 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
291 * support execution of WQEs that involve synchronization
292 * of I/O operations with single completion queue managed
293 * by hardware.
294 */
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300295 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200296 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
Israel Rukshinc0a6cbb2019-06-11 18:52:50 +0300297 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
Max Gurtovoy47355b32016-06-06 19:34:39 +0300298 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200299 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
Max Gurtovoyc7e162a2016-06-06 19:34:40 +0300300 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
Noa Osherovichebaaee22017-01-18 15:39:54 +0200301 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
Max Gurtovoyc7e162a2016-06-06 19:34:40 +0300302 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
Vishwanathapura, Niranjana62e45942017-04-12 20:29:21 -0700303 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
Noa Osheroviche1d2e882017-10-29 13:59:44 +0200304 /* The device supports padding incoming writes to cacheline. */
305 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
Steve Wise3856ec42019-02-15 11:03:53 -0800306 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200307};
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309enum ib_atomic_cap {
310 IB_ATOMIC_NONE,
311 IB_ATOMIC_HCA,
312 IB_ATOMIC_GLOB
313};
314
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200315enum ib_odp_general_cap_bits {
Artemy Kovalyov25bf14d2017-01-18 16:58:06 +0200316 IB_ODP_SUPPORT = 1 << 0,
317 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200318};
319
320enum ib_odp_transport_cap_bits {
321 IB_ODP_SUPPORT_SEND = 1 << 0,
322 IB_ODP_SUPPORT_RECV = 1 << 1,
323 IB_ODP_SUPPORT_WRITE = 1 << 2,
324 IB_ODP_SUPPORT_READ = 1 << 3,
325 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
Moni Shouada823342019-01-22 08:48:41 +0200326 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200327};
328
329struct ib_odp_caps {
330 uint64_t general_caps;
331 struct {
332 uint32_t rc_odp_caps;
333 uint32_t uc_odp_caps;
334 uint32_t ud_odp_caps;
Moni Shoua52a72e22019-01-22 08:48:42 +0200335 uint32_t xrc_odp_caps;
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200336 } per_transport_caps;
337};
338
Yishai Hadasccf20562016-08-28 11:28:43 +0300339struct ib_rss_caps {
340 /* Corresponding bit will be set if qp type from
341 * 'enum ib_qp_type' is supported, e.g.
342 * supported_qpts |= 1 << IB_QPT_UD
343 */
344 u32 supported_qpts;
345 u32 max_rwq_indirection_tables;
346 u32 max_rwq_indirection_table_size;
347};
348
Artemy Kovalyov6938fc1e2017-08-17 15:52:03 +0300349enum ib_tm_cap_flags {
Danit Goldberg89705e92019-07-05 19:21:57 +0300350 /* Support tag matching with rendezvous offload for RC transport */
351 IB_TM_CAP_RNDV_RC = 1 << 0,
Artemy Kovalyov6938fc1e2017-08-17 15:52:03 +0300352};
353
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300354struct ib_tm_caps {
Artemy Kovalyov6938fc1e2017-08-17 15:52:03 +0300355 /* Max size of RNDV header */
356 u32 max_rndv_hdr_size;
357 /* Max number of entries in tag matching list */
358 u32 max_num_tags;
359 /* From enum ib_tm_cap_flags */
360 u32 flags;
361 /* Max number of outstanding list operations */
362 u32 max_ops;
363 /* Max number of SGE in tag matching entry */
364 u32 max_sge;
365};
366
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300367struct ib_cq_init_attr {
368 unsigned int cqe;
Dan Carpentera9018ad2019-10-11 16:34:19 +0300369 u32 comp_vector;
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300370 u32 flags;
371};
372
Yonatan Cohen869ddcf2017-11-13 10:51:13 +0200373enum ib_cq_attr_mask {
374 IB_CQ_MODERATE = 1 << 0,
375};
376
Yonatan Cohen18bd9072017-11-13 10:51:16 +0200377struct ib_cq_caps {
378 u16 max_cq_moderation_count;
379 u16 max_cq_moderation_period;
380};
381
Ariel Levkovichbe934cc2018-04-05 18:53:25 +0300382struct ib_dm_mr_attr {
383 u64 length;
384 u64 offset;
385 u32 access_flags;
386};
387
Ariel Levkovichbee76d72018-04-05 18:53:24 +0300388struct ib_dm_alloc_attr {
389 u64 length;
390 u32 alignment;
391 u32 flags;
392};
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394struct ib_device_attr {
395 u64 fw_ver;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700396 __be64 sys_image_guid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 u64 max_mr_size;
398 u64 page_size_cap;
399 u32 vendor_id;
400 u32 vendor_part_id;
401 u32 hw_ver;
402 int max_qp;
403 int max_qp_wr;
Leon Romanovskyfb532d62016-02-23 10:25:25 +0200404 u64 device_cap_flags;
Steve Wise33023fb2018-06-18 08:05:26 -0700405 int max_send_sge;
406 int max_recv_sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 int max_sge_rd;
408 int max_cq;
409 int max_cqe;
410 int max_mr;
411 int max_pd;
412 int max_qp_rd_atom;
413 int max_ee_rd_atom;
414 int max_res_rd_atom;
415 int max_qp_init_rd_atom;
416 int max_ee_init_rd_atom;
417 enum ib_atomic_cap atomic_cap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300418 enum ib_atomic_cap masked_atomic_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 int max_ee;
420 int max_rdd;
421 int max_mw;
422 int max_raw_ipv6_qp;
423 int max_raw_ethy_qp;
424 int max_mcast_grp;
425 int max_mcast_qp_attach;
426 int max_total_mcast_qp_attach;
427 int max_ah;
428 int max_fmr;
429 int max_map_per_fmr;
430 int max_srq;
431 int max_srq_wr;
432 int max_srq_sge;
Steve Wise00f7ec32008-07-14 23:48:45 -0700433 unsigned int max_fast_reg_page_list_len;
Max Gurtovoy62e3c372019-06-11 18:52:43 +0300434 unsigned int max_pi_fast_reg_page_list_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 u16 max_pkeys;
436 u8 local_ca_ack_delay;
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200437 int sig_prot_cap;
438 int sig_guard_cap;
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200439 struct ib_odp_caps odp_caps;
Matan Barak24306dc2015-06-11 16:35:24 +0300440 uint64_t timestamp_mask;
441 uint64_t hca_core_clock; /* in KHZ */
Yishai Hadasccf20562016-08-28 11:28:43 +0300442 struct ib_rss_caps rss_caps;
443 u32 max_wq_type_rq;
Noa Osherovichebaaee22017-01-18 15:39:54 +0200444 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300445 struct ib_tm_caps tm_caps;
Yonatan Cohen18bd9072017-11-13 10:51:16 +0200446 struct ib_cq_caps cq_caps;
Ariel Levkovich1d8eeb92018-04-05 18:53:23 +0300447 u64 max_dm_size;
Yamin Friedman00bd1432019-10-07 16:59:32 +0300448 /* Max entries for sgl for optimized performance per READ */
449 u32 max_sgl_rd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450};
451
452enum ib_mtu {
453 IB_MTU_256 = 1,
454 IB_MTU_512 = 2,
455 IB_MTU_1024 = 3,
456 IB_MTU_2048 = 4,
457 IB_MTU_4096 = 5
458};
459
460static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
461{
462 switch (mtu) {
463 case IB_MTU_256: return 256;
464 case IB_MTU_512: return 512;
465 case IB_MTU_1024: return 1024;
466 case IB_MTU_2048: return 2048;
467 case IB_MTU_4096: return 4096;
468 default: return -1;
469 }
470}
471
Amrani, Ramd3f4aad2016-12-26 08:40:57 +0200472static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
473{
474 if (mtu >= 4096)
475 return IB_MTU_4096;
476 else if (mtu >= 2048)
477 return IB_MTU_2048;
478 else if (mtu >= 1024)
479 return IB_MTU_1024;
480 else if (mtu >= 512)
481 return IB_MTU_512;
482 else
483 return IB_MTU_256;
484}
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486enum ib_port_state {
487 IB_PORT_NOP = 0,
488 IB_PORT_DOWN = 1,
489 IB_PORT_INIT = 2,
490 IB_PORT_ARMED = 3,
491 IB_PORT_ACTIVE = 4,
492 IB_PORT_ACTIVE_DEFER = 5
493};
494
Kamal Heib72a77202019-08-07 13:31:35 +0300495enum ib_port_phys_state {
496 IB_PORT_PHYS_STATE_SLEEP = 1,
497 IB_PORT_PHYS_STATE_POLLING = 2,
498 IB_PORT_PHYS_STATE_DISABLED = 3,
499 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
500 IB_PORT_PHYS_STATE_LINK_UP = 5,
501 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
502 IB_PORT_PHYS_STATE_PHY_TEST = 7,
503};
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505enum ib_port_width {
506 IB_WIDTH_1X = 1,
Michael Guralnikdbabf682018-12-09 11:49:49 +0200507 IB_WIDTH_2X = 16,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 IB_WIDTH_4X = 2,
509 IB_WIDTH_8X = 4,
510 IB_WIDTH_12X = 8
511};
512
513static inline int ib_width_enum_to_int(enum ib_port_width width)
514{
515 switch (width) {
516 case IB_WIDTH_1X: return 1;
Michael Guralnikdbabf682018-12-09 11:49:49 +0200517 case IB_WIDTH_2X: return 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 case IB_WIDTH_4X: return 4;
519 case IB_WIDTH_8X: return 8;
520 case IB_WIDTH_12X: return 12;
521 default: return -1;
522 }
523}
524
Or Gerlitz2e966912012-02-28 18:49:50 +0200525enum ib_port_speed {
526 IB_SPEED_SDR = 1,
527 IB_SPEED_DDR = 2,
528 IB_SPEED_QDR = 4,
529 IB_SPEED_FDR10 = 8,
530 IB_SPEED_FDR = 16,
Noa Osherovich12113a32017-04-20 20:53:31 +0300531 IB_SPEED_EDR = 32,
532 IB_SPEED_HDR = 64
Or Gerlitz2e966912012-02-28 18:49:50 +0200533};
534
Christoph Lameterb40f4752016-05-16 12:49:33 -0500535/**
536 * struct rdma_hw_stats
Mark Bloche9451302018-03-27 15:51:05 +0300537 * @lock - Mutex to protect parallel write access to lifespan and values
538 * of counters, which are 64bits and not guaranteeed to be written
539 * atomicaly on 32bits systems.
Christoph Lameterb40f4752016-05-16 12:49:33 -0500540 * @timestamp - Used by the core code to track when the last update was
541 * @lifespan - Used by the core code to determine how old the counters
542 * should be before being updated again. Stored in jiffies, defaults
543 * to 10 milliseconds, drivers can override the default be specifying
544 * their own value during their allocation routine.
545 * @name - Array of pointers to static names used for the counters in
546 * directory.
547 * @num_counters - How many hardware counters there are. If name is
548 * shorter than this number, a kernel oops will result. Driver authors
549 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
550 * in their code to prevent this.
551 * @value - Array of u64 counters that are accessed by the sysfs code and
552 * filled in by the drivers get_stats routine
553 */
554struct rdma_hw_stats {
Mark Bloche9451302018-03-27 15:51:05 +0300555 struct mutex lock; /* Protect lifespan and values[] */
Christoph Lameterb40f4752016-05-16 12:49:33 -0500556 unsigned long timestamp;
557 unsigned long lifespan;
558 const char * const *names;
559 int num_counters;
560 u64 value[];
Steve Wise7f624d02008-07-14 23:48:48 -0700561};
562
Christoph Lameterb40f4752016-05-16 12:49:33 -0500563#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
564/**
565 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
566 * for drivers.
567 * @names - Array of static const char *
568 * @num_counters - How many elements in array
569 * @lifespan - How many milliseconds between updates
570 */
571static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
572 const char * const *names, int num_counters,
573 unsigned long lifespan)
574{
575 struct rdma_hw_stats *stats;
Steve Wise7f624d02008-07-14 23:48:48 -0700576
Christoph Lameterb40f4752016-05-16 12:49:33 -0500577 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
578 GFP_KERNEL);
579 if (!stats)
580 return NULL;
581 stats->names = names;
582 stats->num_counters = num_counters;
583 stats->lifespan = msecs_to_jiffies(lifespan);
Steve Wise7f624d02008-07-14 23:48:48 -0700584
Christoph Lameterb40f4752016-05-16 12:49:33 -0500585 return stats;
586}
587
Steve Wise7f624d02008-07-14 23:48:48 -0700588
Ira Weinyf9b22e32015-05-13 20:02:59 -0400589/* Define bits for the various functionality this port needs to be supported by
590 * the core.
591 */
592/* Management 0x00000FFF */
593#define RDMA_CORE_CAP_IB_MAD 0x00000001
594#define RDMA_CORE_CAP_IB_SMI 0x00000002
595#define RDMA_CORE_CAP_IB_CM 0x00000004
596#define RDMA_CORE_CAP_IW_CM 0x00000008
597#define RDMA_CORE_CAP_IB_SA 0x00000010
Ira Weiny65995fe2015-06-06 14:38:32 -0400598#define RDMA_CORE_CAP_OPA_MAD 0x00000020
Ira Weinyf9b22e32015-05-13 20:02:59 -0400599
600/* Address format 0x000FF000 */
601#define RDMA_CORE_CAP_AF_IB 0x00001000
602#define RDMA_CORE_CAP_ETH_AH 0x00002000
Dasaratharaman Chandramouli94d595c2017-03-20 19:38:09 -0400603#define RDMA_CORE_CAP_OPA_AH 0x00004000
Artemy Kovalyovb02289b2018-07-04 15:57:50 +0300604#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
Ira Weinyf9b22e32015-05-13 20:02:59 -0400605
606/* Protocol 0xFFF00000 */
607#define RDMA_CORE_CAP_PROT_IB 0x00100000
608#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
609#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
Matan Barak7766a992015-12-23 14:56:50 +0200610#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
Or Gerlitzaa773bd2017-01-24 13:02:35 +0200611#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
Or Gerlitzce1e0552017-01-24 13:02:38 +0200612#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
Ira Weinyf9b22e32015-05-13 20:02:59 -0400613
Artemy Kovalyovb02289b2018-07-04 15:57:50 +0300614#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
615 | RDMA_CORE_CAP_PROT_ROCE \
616 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
617
Ira Weinyf9b22e32015-05-13 20:02:59 -0400618#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
619 | RDMA_CORE_CAP_IB_MAD \
620 | RDMA_CORE_CAP_IB_SMI \
621 | RDMA_CORE_CAP_IB_CM \
622 | RDMA_CORE_CAP_IB_SA \
623 | RDMA_CORE_CAP_AF_IB)
624#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
625 | RDMA_CORE_CAP_IB_MAD \
626 | RDMA_CORE_CAP_IB_CM \
Ira Weinyf9b22e32015-05-13 20:02:59 -0400627 | RDMA_CORE_CAP_AF_IB \
628 | RDMA_CORE_CAP_ETH_AH)
Matan Barak7766a992015-12-23 14:56:50 +0200629#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
630 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
631 | RDMA_CORE_CAP_IB_MAD \
632 | RDMA_CORE_CAP_IB_CM \
633 | RDMA_CORE_CAP_AF_IB \
634 | RDMA_CORE_CAP_ETH_AH)
Ira Weinyf9b22e32015-05-13 20:02:59 -0400635#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
636 | RDMA_CORE_CAP_IW_CM)
Ira Weiny65995fe2015-06-06 14:38:32 -0400637#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
638 | RDMA_CORE_CAP_OPA_MAD)
Ira Weinyf9b22e32015-05-13 20:02:59 -0400639
Or Gerlitzaa773bd2017-01-24 13:02:35 +0200640#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
641
Or Gerlitzce1e0552017-01-24 13:02:38 +0200642#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644struct ib_port_attr {
Eli Cohenfad61ad2016-03-11 22:58:36 +0200645 u64 subnet_prefix;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 enum ib_port_state state;
647 enum ib_mtu max_mtu;
648 enum ib_mtu active_mtu;
649 int gid_tbl_len;
Jason Gunthorpe2f944c02018-07-04 15:57:48 +0300650 unsigned int ip_gids:1;
651 /* This is the value from PortInfo CapabilityMask, defined by IBA */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 u32 port_cap_flags;
653 u32 max_msg_sz;
654 u32 bad_pkey_cntr;
655 u32 qkey_viol_cntr;
656 u16 pkey_tbl_len;
Dasaratharaman Chandramoulidb585402017-06-08 13:37:48 -0400657 u32 sm_lid;
Dasaratharaman Chandramouli582faf32017-06-08 13:37:47 -0400658 u32 lid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 u8 lmc;
660 u8 max_vl_num;
661 u8 sm_sl;
662 u8 subnet_timeout;
663 u8 init_type_reply;
664 u8 active_width;
665 u8 active_speed;
666 u8 phys_state;
Michael Guralnik1e8f43b2018-12-09 11:49:48 +0200667 u16 port_cap_flags2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668};
669
670enum ib_device_modify_flags {
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800671 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
672 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673};
674
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700675#define IB_DEVICE_NODE_DESC_MAX 64
676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677struct ib_device_modify {
678 u64 sys_image_guid;
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700679 char node_desc[IB_DEVICE_NODE_DESC_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680};
681
682enum ib_port_modify_flags {
683 IB_PORT_SHUTDOWN = 1,
684 IB_PORT_INIT_TYPE = (1<<2),
Vishwanathapura, Niranjanacb493662017-06-01 17:04:02 -0700685 IB_PORT_RESET_QKEY_CNTR = (1<<3),
686 IB_PORT_OPA_MASK_CHG = (1<<4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687};
688
689struct ib_port_modify {
690 u32 set_port_cap_mask;
691 u32 clr_port_cap_mask;
692 u8 init_type;
693};
694
695enum ib_event_type {
696 IB_EVENT_CQ_ERR,
697 IB_EVENT_QP_FATAL,
698 IB_EVENT_QP_REQ_ERR,
699 IB_EVENT_QP_ACCESS_ERR,
700 IB_EVENT_COMM_EST,
701 IB_EVENT_SQ_DRAINED,
702 IB_EVENT_PATH_MIG,
703 IB_EVENT_PATH_MIG_ERR,
704 IB_EVENT_DEVICE_FATAL,
705 IB_EVENT_PORT_ACTIVE,
706 IB_EVENT_PORT_ERR,
707 IB_EVENT_LID_CHANGE,
708 IB_EVENT_PKEY_CHANGE,
Roland Dreierd41fcc62005-08-18 12:23:08 -0700709 IB_EVENT_SM_CHANGE,
710 IB_EVENT_SRQ_ERR,
711 IB_EVENT_SRQ_LIMIT_REACHED,
Leonid Arsh63942c92006-06-17 20:37:35 -0700712 IB_EVENT_QP_LAST_WQE_REACHED,
Or Gerlitz761d90e2011-06-15 14:39:29 +0000713 IB_EVENT_CLIENT_REREGISTER,
714 IB_EVENT_GID_CHANGE,
Yishai Hadasf213c052016-05-23 15:20:49 +0300715 IB_EVENT_WQ_FATAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716};
717
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700718const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300719
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720struct ib_event {
721 struct ib_device *device;
722 union {
723 struct ib_cq *cq;
724 struct ib_qp *qp;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700725 struct ib_srq *srq;
Yishai Hadasf213c052016-05-23 15:20:49 +0300726 struct ib_wq *wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 u8 port_num;
728 } element;
729 enum ib_event_type event;
730};
731
732struct ib_event_handler {
733 struct ib_device *device;
734 void (*handler)(struct ib_event_handler *, struct ib_event *);
735 struct list_head list;
736};
737
738#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
739 do { \
740 (_ptr)->device = _device; \
741 (_ptr)->handler = _handler; \
742 INIT_LIST_HEAD(&(_ptr)->list); \
743 } while (0)
744
745struct ib_global_route {
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +0300746 const struct ib_gid_attr *sgid_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 union ib_gid dgid;
748 u32 flow_label;
749 u8 sgid_index;
750 u8 hop_limit;
751 u8 traffic_class;
752};
753
Hal Rosenstock513789e2005-07-27 11:45:34 -0700754struct ib_grh {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700755 __be32 version_tclass_flow;
756 __be16 paylen;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700757 u8 next_hdr;
758 u8 hop_limit;
759 union ib_gid sgid;
760 union ib_gid dgid;
761};
762
Somnath Koturc865f242015-12-23 14:56:51 +0200763union rdma_network_hdr {
764 struct ib_grh ibgrh;
765 struct {
766 /* The IB spec states that if it's IPv4, the header
767 * is located in the last 20 bytes of the header.
768 */
769 u8 reserved[20];
770 struct iphdr roce4grh;
771 };
772};
773
Don Hiatt7dafbab2017-05-12 09:19:55 -0700774#define IB_QPN_MASK 0xFFFFFF
775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776enum {
777 IB_MULTICAST_QPN = 0xffffff
778};
779
Harvey Harrisonf3a7c662009-02-14 22:58:35 -0800780#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
Dennis Dalessandrob4e64392016-01-06 10:04:31 -0800781#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
Sean Hefty97f52eb2005-08-13 21:05:57 -0700782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783enum ib_ah_flags {
784 IB_AH_GRH = 1
785};
786
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700787enum ib_rate {
788 IB_RATE_PORT_CURRENT = 0,
789 IB_RATE_2_5_GBPS = 2,
790 IB_RATE_5_GBPS = 5,
791 IB_RATE_10_GBPS = 3,
792 IB_RATE_20_GBPS = 6,
793 IB_RATE_30_GBPS = 4,
794 IB_RATE_40_GBPS = 7,
795 IB_RATE_60_GBPS = 8,
796 IB_RATE_80_GBPS = 9,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300797 IB_RATE_120_GBPS = 10,
798 IB_RATE_14_GBPS = 11,
799 IB_RATE_56_GBPS = 12,
800 IB_RATE_112_GBPS = 13,
801 IB_RATE_168_GBPS = 14,
802 IB_RATE_25_GBPS = 15,
803 IB_RATE_100_GBPS = 16,
804 IB_RATE_200_GBPS = 17,
Michael Guralnika5a5d192018-12-09 11:49:50 +0200805 IB_RATE_300_GBPS = 18,
806 IB_RATE_28_GBPS = 19,
807 IB_RATE_50_GBPS = 20,
808 IB_RATE_400_GBPS = 21,
809 IB_RATE_600_GBPS = 22,
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700810};
811
812/**
813 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
814 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
815 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
816 * @rate: rate to convert.
817 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700818__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700819
820/**
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300821 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
822 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
823 * @rate: rate to convert.
824 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700825__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300826
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200827
828/**
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300829 * enum ib_mr_type - memory region type
830 * @IB_MR_TYPE_MEM_REG: memory region that is used for
831 * normal registration
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200832 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
833 * register any arbitrary sg lists (without
834 * the normal mr constraints - see
835 * ib_map_mr_sg)
Max Gurtovoya0bc0992019-06-11 18:52:38 +0300836 * @IB_MR_TYPE_DM: memory region that is used for device
837 * memory registration
838 * @IB_MR_TYPE_USER: memory region that is used for the user-space
839 * application
840 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations
841 * without address translations (VA=PA)
Israel Rukshin26bc7ea2019-06-11 18:52:39 +0300842 * @IB_MR_TYPE_INTEGRITY: memory region that is used for
843 * data integrity operations
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200844 */
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300845enum ib_mr_type {
846 IB_MR_TYPE_MEM_REG,
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200847 IB_MR_TYPE_SG_GAPS,
Max Gurtovoya0bc0992019-06-11 18:52:38 +0300848 IB_MR_TYPE_DM,
849 IB_MR_TYPE_USER,
850 IB_MR_TYPE_DMA,
Israel Rukshin26bc7ea2019-06-11 18:52:39 +0300851 IB_MR_TYPE_INTEGRITY,
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200852};
853
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200854enum ib_mr_status_check {
855 IB_MR_CHECK_SIG_STATUS = 1,
856};
857
858/**
859 * struct ib_mr_status - Memory region status container
860 *
861 * @fail_status: Bitmask of MR checks status. For each
862 * failed check a corresponding status bit is set.
863 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
864 * failure.
865 */
866struct ib_mr_status {
867 u32 fail_status;
868 struct ib_sig_err sig_err;
869};
870
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300871/**
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700872 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
873 * enum.
874 * @mult: multiple to convert.
875 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700876__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700877
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400878enum rdma_ah_attr_type {
Don Hiatt87daac62018-02-01 10:57:03 -0800879 RDMA_AH_ATTR_TYPE_UNDEFINED,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400880 RDMA_AH_ATTR_TYPE_IB,
881 RDMA_AH_ATTR_TYPE_ROCE,
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400882 RDMA_AH_ATTR_TYPE_OPA,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400883};
884
885struct ib_ah_attr {
886 u16 dlid;
887 u8 src_path_bits;
888};
889
890struct roce_ah_attr {
891 u8 dmac[ETH_ALEN];
892};
893
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400894struct opa_ah_attr {
895 u32 dlid;
896 u8 src_path_bits;
Don Hiattd98bb7f2017-08-04 13:54:16 -0700897 bool make_grd;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400898};
899
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400900struct rdma_ah_attr {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 struct ib_global_route grh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 u8 sl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 u8 static_rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 u8 port_num;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400905 u8 ah_flags;
906 enum rdma_ah_attr_type type;
907 union {
908 struct ib_ah_attr ib;
909 struct roce_ah_attr roce;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400910 struct opa_ah_attr opa;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400911 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912};
913
914enum ib_wc_status {
915 IB_WC_SUCCESS,
916 IB_WC_LOC_LEN_ERR,
917 IB_WC_LOC_QP_OP_ERR,
918 IB_WC_LOC_EEC_OP_ERR,
919 IB_WC_LOC_PROT_ERR,
920 IB_WC_WR_FLUSH_ERR,
921 IB_WC_MW_BIND_ERR,
922 IB_WC_BAD_RESP_ERR,
923 IB_WC_LOC_ACCESS_ERR,
924 IB_WC_REM_INV_REQ_ERR,
925 IB_WC_REM_ACCESS_ERR,
926 IB_WC_REM_OP_ERR,
927 IB_WC_RETRY_EXC_ERR,
928 IB_WC_RNR_RETRY_EXC_ERR,
929 IB_WC_LOC_RDD_VIOL_ERR,
930 IB_WC_REM_INV_RD_REQ_ERR,
931 IB_WC_REM_ABORT_ERR,
932 IB_WC_INV_EECN_ERR,
933 IB_WC_INV_EEC_STATE_ERR,
934 IB_WC_FATAL_ERR,
935 IB_WC_RESP_TIMEOUT_ERR,
936 IB_WC_GENERAL_ERR
937};
938
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700939const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941enum ib_wc_opcode {
942 IB_WC_SEND,
943 IB_WC_RDMA_WRITE,
944 IB_WC_RDMA_READ,
945 IB_WC_COMP_SWAP,
946 IB_WC_FETCH_ADD,
Eli Cohenc93570f2008-04-16 21:09:27 -0700947 IB_WC_LSO,
Steve Wise00f7ec32008-07-14 23:48:45 -0700948 IB_WC_LOCAL_INV,
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +0300949 IB_WC_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300950 IB_WC_MASKED_COMP_SWAP,
951 IB_WC_MASKED_FETCH_ADD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952/*
953 * Set value of IB_WC_RECV so consumers can test if a completion is a
954 * receive by testing (opcode & IB_WC_RECV).
955 */
956 IB_WC_RECV = 1 << 7,
957 IB_WC_RECV_RDMA_WITH_IMM
958};
959
960enum ib_wc_flags {
961 IB_WC_GRH = 1,
Steve Wise00f7ec32008-07-14 23:48:45 -0700962 IB_WC_WITH_IMM = (1<<1),
963 IB_WC_WITH_INVALIDATE = (1<<2),
Or Gerlitzd927d502012-01-11 19:03:51 +0200964 IB_WC_IP_CSUM_OK = (1<<3),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200965 IB_WC_WITH_SMAC = (1<<4),
966 IB_WC_WITH_VLAN = (1<<5),
Somnath Koturc865f242015-12-23 14:56:51 +0200967 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968};
969
970struct ib_wc {
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -0800971 union {
972 u64 wr_id;
973 struct ib_cqe *wr_cqe;
974 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 enum ib_wc_status status;
976 enum ib_wc_opcode opcode;
977 u32 vendor_err;
978 u32 byte_len;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200979 struct ib_qp *qp;
Steve Wise00f7ec32008-07-14 23:48:45 -0700980 union {
981 __be32 imm_data;
982 u32 invalidate_rkey;
983 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 u32 src_qp;
Bodong Wangcd2a6e72018-01-12 07:58:41 +0200985 u32 slid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 int wc_flags;
987 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 u8 sl;
989 u8 dlid_path_bits;
990 u8 port_num; /* valid only for DR SMPs on switches */
Matan Barakdd5f03b2013-12-12 18:03:11 +0200991 u8 smac[ETH_ALEN];
992 u16 vlan_id;
Somnath Koturc865f242015-12-23 14:56:51 +0200993 u8 network_hdr_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994};
995
Roland Dreiered23a722007-05-06 21:02:48 -0700996enum ib_cq_notify_flags {
997 IB_CQ_SOLICITED = 1 << 0,
998 IB_CQ_NEXT_COMP = 1 << 1,
999 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1000 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001};
1002
Sean Hefty96104ed2011-05-23 16:31:36 -07001003enum ib_srq_type {
Sean Hefty418d5132011-05-23 19:42:29 -07001004 IB_SRQT_BASIC,
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001005 IB_SRQT_XRC,
1006 IB_SRQT_TM,
Sean Hefty96104ed2011-05-23 16:31:36 -07001007};
1008
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001009static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1010{
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001011 return srq_type == IB_SRQT_XRC ||
1012 srq_type == IB_SRQT_TM;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001013}
1014
Roland Dreierd41fcc62005-08-18 12:23:08 -07001015enum ib_srq_attr_mask {
1016 IB_SRQ_MAX_WR = 1 << 0,
1017 IB_SRQ_LIMIT = 1 << 1,
1018};
1019
1020struct ib_srq_attr {
1021 u32 max_wr;
1022 u32 max_sge;
1023 u32 srq_limit;
1024};
1025
1026struct ib_srq_init_attr {
1027 void (*event_handler)(struct ib_event *, void *);
1028 void *srq_context;
1029 struct ib_srq_attr attr;
Sean Hefty96104ed2011-05-23 16:31:36 -07001030 enum ib_srq_type srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -07001031
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001032 struct {
1033 struct ib_cq *cq;
1034 union {
1035 struct {
1036 struct ib_xrcd *xrcd;
1037 } xrc;
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001038
1039 struct {
1040 u32 max_num_tags;
1041 } tag_matching;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001042 };
Sean Hefty418d5132011-05-23 19:42:29 -07001043 } ext;
Roland Dreierd41fcc62005-08-18 12:23:08 -07001044};
1045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046struct ib_qp_cap {
1047 u32 max_send_wr;
1048 u32 max_recv_wr;
1049 u32 max_send_sge;
1050 u32 max_recv_sge;
1051 u32 max_inline_data;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001052
1053 /*
1054 * Maximum number of rdma_rw_ctx structures in flight at a time.
1055 * ib_create_qp() will calculate the right amount of neededed WRs
1056 * and MRs based on this.
1057 */
1058 u32 max_rdma_ctxs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059};
1060
1061enum ib_sig_type {
1062 IB_SIGNAL_ALL_WR,
1063 IB_SIGNAL_REQ_WR
1064};
1065
1066enum ib_qp_type {
1067 /*
1068 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1069 * here (and in that order) since the MAD layer uses them as
1070 * indices into a 2-entry table.
1071 */
1072 IB_QPT_SMI,
1073 IB_QPT_GSI,
1074
1075 IB_QPT_RC,
1076 IB_QPT_UC,
1077 IB_QPT_UD,
1078 IB_QPT_RAW_IPV6,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001079 IB_QPT_RAW_ETHERTYPE,
Or Gerlitzc938a612012-03-01 12:17:51 +02001080 IB_QPT_RAW_PACKET = 8,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001081 IB_QPT_XRC_INI = 9,
1082 IB_QPT_XRC_TGT,
Jack Morgenstein0134f162013-07-07 17:25:52 +03001083 IB_QPT_MAX,
Moni Shoua8011c1e2018-01-02 16:19:30 +02001084 IB_QPT_DRIVER = 0xFF,
Jack Morgenstein0134f162013-07-07 17:25:52 +03001085 /* Reserve a range for qp types internal to the low level driver.
1086 * These qp types will not be visible at the IB core layer, so the
1087 * IB_QPT_MAX usages should not be affected in the core layer
1088 */
1089 IB_QPT_RESERVED1 = 0x1000,
1090 IB_QPT_RESERVED2,
1091 IB_QPT_RESERVED3,
1092 IB_QPT_RESERVED4,
1093 IB_QPT_RESERVED5,
1094 IB_QPT_RESERVED6,
1095 IB_QPT_RESERVED7,
1096 IB_QPT_RESERVED8,
1097 IB_QPT_RESERVED9,
1098 IB_QPT_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099};
1100
Eli Cohenb846f252008-04-16 21:09:27 -07001101enum ib_qp_create_flags {
Ron Livne47ee1b92008-07-14 23:48:48 -07001102 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1103 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
Leon Romanovsky8a06ce52015-12-20 12:16:10 +02001104 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1105 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1106 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
Matan Barak90f1d1b2013-11-07 15:25:12 +02001107 IB_QP_CREATE_NETIF_QP = 1 << 5,
Israel Rukshinc0a6cbb2019-06-11 18:52:50 +03001108 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
Leon Romanovsky7855f582017-05-23 14:38:16 +03001109 /* FREE = 1 << 7, */
Majd Dibbinyb531b902016-04-17 17:19:36 +03001110 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
Noa Osherovich9c2b2702017-01-18 15:39:56 +02001111 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
Yishai Hadas02984cc2017-06-08 16:15:06 +03001112 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
Noa Osheroviche1d2e882017-10-29 13:59:44 +02001113 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
Jack Morgensteind2b57062012-08-03 08:40:37 +00001114 /* reserve bits 26-31 for low level drivers' internal use */
1115 IB_QP_CREATE_RESERVED_START = 1 << 26,
1116 IB_QP_CREATE_RESERVED_END = 1 << 31,
Eli Cohenb846f252008-04-16 21:09:27 -07001117};
1118
Yishai Hadas73c40c62013-08-01 18:49:53 +03001119/*
1120 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1121 * callback to destroy the passed in QP.
1122 */
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124struct ib_qp_init_attr {
Chuck Levereb93c82e2018-09-04 11:45:20 -04001125 /* Consumer's event_handler callback must not block */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 void (*event_handler)(struct ib_event *, void *);
Chuck Levereb93c82e2018-09-04 11:45:20 -04001127
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 void *qp_context;
1129 struct ib_cq *send_cq;
1130 struct ib_cq *recv_cq;
1131 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001132 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 struct ib_qp_cap cap;
1134 enum ib_sig_type sq_sig_type;
1135 enum ib_qp_type qp_type;
Nathan Chancellorb56511c2018-09-24 12:57:16 -07001136 u32 create_flags;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001137
1138 /*
1139 * Only needed for special QP types, or when using the RW API.
1140 */
1141 u8 port_num;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001142 struct ib_rwq_ind_table *rwq_ind_tbl;
Yishai Hadas02984cc2017-06-08 16:15:06 +03001143 u32 source_qpn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144};
1145
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001146struct ib_qp_open_attr {
1147 void (*event_handler)(struct ib_event *, void *);
1148 void *qp_context;
1149 u32 qp_num;
1150 enum ib_qp_type qp_type;
1151};
1152
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153enum ib_rnr_timeout {
1154 IB_RNR_TIMER_655_36 = 0,
1155 IB_RNR_TIMER_000_01 = 1,
1156 IB_RNR_TIMER_000_02 = 2,
1157 IB_RNR_TIMER_000_03 = 3,
1158 IB_RNR_TIMER_000_04 = 4,
1159 IB_RNR_TIMER_000_06 = 5,
1160 IB_RNR_TIMER_000_08 = 6,
1161 IB_RNR_TIMER_000_12 = 7,
1162 IB_RNR_TIMER_000_16 = 8,
1163 IB_RNR_TIMER_000_24 = 9,
1164 IB_RNR_TIMER_000_32 = 10,
1165 IB_RNR_TIMER_000_48 = 11,
1166 IB_RNR_TIMER_000_64 = 12,
1167 IB_RNR_TIMER_000_96 = 13,
1168 IB_RNR_TIMER_001_28 = 14,
1169 IB_RNR_TIMER_001_92 = 15,
1170 IB_RNR_TIMER_002_56 = 16,
1171 IB_RNR_TIMER_003_84 = 17,
1172 IB_RNR_TIMER_005_12 = 18,
1173 IB_RNR_TIMER_007_68 = 19,
1174 IB_RNR_TIMER_010_24 = 20,
1175 IB_RNR_TIMER_015_36 = 21,
1176 IB_RNR_TIMER_020_48 = 22,
1177 IB_RNR_TIMER_030_72 = 23,
1178 IB_RNR_TIMER_040_96 = 24,
1179 IB_RNR_TIMER_061_44 = 25,
1180 IB_RNR_TIMER_081_92 = 26,
1181 IB_RNR_TIMER_122_88 = 27,
1182 IB_RNR_TIMER_163_84 = 28,
1183 IB_RNR_TIMER_245_76 = 29,
1184 IB_RNR_TIMER_327_68 = 30,
1185 IB_RNR_TIMER_491_52 = 31
1186};
1187
1188enum ib_qp_attr_mask {
1189 IB_QP_STATE = 1,
1190 IB_QP_CUR_STATE = (1<<1),
1191 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1192 IB_QP_ACCESS_FLAGS = (1<<3),
1193 IB_QP_PKEY_INDEX = (1<<4),
1194 IB_QP_PORT = (1<<5),
1195 IB_QP_QKEY = (1<<6),
1196 IB_QP_AV = (1<<7),
1197 IB_QP_PATH_MTU = (1<<8),
1198 IB_QP_TIMEOUT = (1<<9),
1199 IB_QP_RETRY_CNT = (1<<10),
1200 IB_QP_RNR_RETRY = (1<<11),
1201 IB_QP_RQ_PSN = (1<<12),
1202 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1203 IB_QP_ALT_PATH = (1<<14),
1204 IB_QP_MIN_RNR_TIMER = (1<<15),
1205 IB_QP_SQ_PSN = (1<<16),
1206 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1207 IB_QP_PATH_MIG_STATE = (1<<18),
1208 IB_QP_CAP = (1<<19),
Matan Barakdd5f03b2013-12-12 18:03:11 +02001209 IB_QP_DEST_QPN = (1<<20),
Matan Barakaa744cc2015-10-15 18:38:53 +03001210 IB_QP_RESERVED1 = (1<<21),
1211 IB_QP_RESERVED2 = (1<<22),
1212 IB_QP_RESERVED3 = (1<<23),
1213 IB_QP_RESERVED4 = (1<<24),
Bodong Wang528e5a12016-12-01 13:43:14 +02001214 IB_QP_RATE_LIMIT = (1<<25),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215};
1216
1217enum ib_qp_state {
1218 IB_QPS_RESET,
1219 IB_QPS_INIT,
1220 IB_QPS_RTR,
1221 IB_QPS_RTS,
1222 IB_QPS_SQD,
1223 IB_QPS_SQE,
1224 IB_QPS_ERR
1225};
1226
1227enum ib_mig_state {
1228 IB_MIG_MIGRATED,
1229 IB_MIG_REARM,
1230 IB_MIG_ARMED
1231};
1232
Shani Michaeli7083e422013-02-06 16:19:12 +00001233enum ib_mw_type {
1234 IB_MW_TYPE_1 = 1,
1235 IB_MW_TYPE_2 = 2
1236};
1237
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238struct ib_qp_attr {
1239 enum ib_qp_state qp_state;
1240 enum ib_qp_state cur_qp_state;
1241 enum ib_mtu path_mtu;
1242 enum ib_mig_state path_mig_state;
1243 u32 qkey;
1244 u32 rq_psn;
1245 u32 sq_psn;
1246 u32 dest_qp_num;
1247 int qp_access_flags;
1248 struct ib_qp_cap cap;
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001249 struct rdma_ah_attr ah_attr;
1250 struct rdma_ah_attr alt_ah_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 u16 pkey_index;
1252 u16 alt_pkey_index;
1253 u8 en_sqd_async_notify;
1254 u8 sq_draining;
1255 u8 max_rd_atomic;
1256 u8 max_dest_rd_atomic;
1257 u8 min_rnr_timer;
1258 u8 port_num;
1259 u8 timeout;
1260 u8 retry_cnt;
1261 u8 rnr_retry;
1262 u8 alt_port_num;
1263 u8 alt_timeout;
Bodong Wang528e5a12016-12-01 13:43:14 +02001264 u32 rate_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265};
1266
1267enum ib_wr_opcode {
Jason Gunthorpe9a597392018-08-14 15:33:02 -07001268 /* These are shared with userspace */
1269 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1270 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1271 IB_WR_SEND = IB_UVERBS_WR_SEND,
1272 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1273 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1274 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1275 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1276 IB_WR_LSO = IB_UVERBS_WR_TSO,
1277 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1278 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1279 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1280 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1281 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1282 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1283 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1284
1285 /* These are kernel only and can not be issued by userspace */
1286 IB_WR_REG_MR = 0x20,
Max Gurtovoy38ca87c2019-06-11 18:52:46 +03001287 IB_WR_REG_MR_INTEGRITY,
Jason Gunthorpe9a597392018-08-14 15:33:02 -07001288
Jack Morgenstein0134f162013-07-07 17:25:52 +03001289 /* reserve values for low level drivers' internal use.
1290 * These values will not be used at all in the ib core layer.
1291 */
1292 IB_WR_RESERVED1 = 0xf0,
1293 IB_WR_RESERVED2,
1294 IB_WR_RESERVED3,
1295 IB_WR_RESERVED4,
1296 IB_WR_RESERVED5,
1297 IB_WR_RESERVED6,
1298 IB_WR_RESERVED7,
1299 IB_WR_RESERVED8,
1300 IB_WR_RESERVED9,
1301 IB_WR_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302};
1303
1304enum ib_send_flags {
1305 IB_SEND_FENCE = 1,
1306 IB_SEND_SIGNALED = (1<<1),
1307 IB_SEND_SOLICITED = (1<<2),
Eli Cohene0605d92008-01-30 18:30:57 +02001308 IB_SEND_INLINE = (1<<3),
Jack Morgenstein0134f162013-07-07 17:25:52 +03001309 IB_SEND_IP_CSUM = (1<<4),
1310
1311 /* reserve bits 26-31 for low level drivers' internal use */
1312 IB_SEND_RESERVED_START = (1 << 26),
1313 IB_SEND_RESERVED_END = (1 << 31),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314};
1315
1316struct ib_sge {
1317 u64 addr;
1318 u32 length;
1319 u32 lkey;
1320};
1321
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001322struct ib_cqe {
1323 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1324};
1325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326struct ib_send_wr {
1327 struct ib_send_wr *next;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001328 union {
1329 u64 wr_id;
1330 struct ib_cqe *wr_cqe;
1331 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 struct ib_sge *sg_list;
1333 int num_sge;
1334 enum ib_wr_opcode opcode;
1335 int send_flags;
Roland Dreier0f39cf32008-04-16 21:09:32 -07001336 union {
1337 __be32 imm_data;
1338 u32 invalidate_rkey;
1339 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340};
1341
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001342struct ib_rdma_wr {
1343 struct ib_send_wr wr;
1344 u64 remote_addr;
1345 u32 rkey;
1346};
1347
Bart Van Asschef696bf62018-07-18 09:25:14 -07001348static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001349{
1350 return container_of(wr, struct ib_rdma_wr, wr);
1351}
1352
1353struct ib_atomic_wr {
1354 struct ib_send_wr wr;
1355 u64 remote_addr;
1356 u64 compare_add;
1357 u64 swap;
1358 u64 compare_add_mask;
1359 u64 swap_mask;
1360 u32 rkey;
1361};
1362
Bart Van Asschef696bf62018-07-18 09:25:14 -07001363static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001364{
1365 return container_of(wr, struct ib_atomic_wr, wr);
1366}
1367
1368struct ib_ud_wr {
1369 struct ib_send_wr wr;
1370 struct ib_ah *ah;
1371 void *header;
1372 int hlen;
1373 int mss;
1374 u32 remote_qpn;
1375 u32 remote_qkey;
1376 u16 pkey_index; /* valid for GSI only */
1377 u8 port_num; /* valid for DR SMPs on switch only */
1378};
1379
Bart Van Asschef696bf62018-07-18 09:25:14 -07001380static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001381{
1382 return container_of(wr, struct ib_ud_wr, wr);
1383}
1384
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001385struct ib_reg_wr {
1386 struct ib_send_wr wr;
1387 struct ib_mr *mr;
1388 u32 key;
1389 int access;
1390};
1391
Bart Van Asschef696bf62018-07-18 09:25:14 -07001392static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001393{
1394 return container_of(wr, struct ib_reg_wr, wr);
1395}
1396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397struct ib_recv_wr {
1398 struct ib_recv_wr *next;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001399 union {
1400 u64 wr_id;
1401 struct ib_cqe *wr_cqe;
1402 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 struct ib_sge *sg_list;
1404 int num_sge;
1405};
1406
1407enum ib_access_flags {
Jason Gunthorpe4fca0372018-07-11 16:20:44 -06001408 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1409 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1410 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1411 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1412 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1413 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1414 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1415 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1416
1417 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418};
1419
Christoph Hellwigb7d3e0a2015-12-23 19:12:47 +01001420/*
1421 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1422 * are hidden here instead of a uapi header!
1423 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424enum ib_mr_rereg_flags {
1425 IB_MR_REREG_TRANS = 1,
1426 IB_MR_REREG_PD = (1<<1),
Matan Barak7e6edb92014-07-31 11:01:28 +03001427 IB_MR_REREG_ACCESS = (1<<2),
1428 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429};
1430
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431struct ib_fmr_attr {
1432 int max_pages;
1433 int max_maps;
Or Gerlitzd36f34a2006-02-02 10:43:45 -08001434 u8 page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435};
1436
Haggai Eran882214e2014-12-11 17:04:18 +02001437struct ib_umem;
1438
Matan Barak38321252017-04-04 13:31:42 +03001439enum rdma_remove_reason {
Yishai Hadas1c774832018-06-20 17:11:39 +03001440 /*
1441 * Userspace requested uobject deletion or initial try
1442 * to remove uobject via cleanup. Call could fail
1443 */
Matan Barak38321252017-04-04 13:31:42 +03001444 RDMA_REMOVE_DESTROY,
1445 /* Context deletion. This call should delete the actual object itself */
1446 RDMA_REMOVE_CLOSE,
1447 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1448 RDMA_REMOVE_DRIVER_REMOVE,
Jason Gunthorpe87ad80a2018-07-25 21:40:12 -06001449 /* uobj is being cleaned-up before being committed */
1450 RDMA_REMOVE_ABORT,
Matan Barak38321252017-04-04 13:31:42 +03001451};
1452
Parav Pandit43579b52017-01-10 00:02:14 +00001453struct ib_rdmacg_object {
1454#ifdef CONFIG_CGROUP_RDMA
1455 struct rdma_cgroup *cg; /* owner rdma cgroup */
1456#endif
1457};
1458
Roland Dreiere2773c02005-07-07 17:57:10 -07001459struct ib_ucontext {
1460 struct ib_device *device;
Matan Barak771addf2017-04-04 13:31:41 +03001461 struct ib_uverbs_file *ufile;
Jason Gunthorpee9517472018-07-10 20:55:19 -06001462 /*
1463 * 'closing' can be read by the driver only during a destroy callback,
1464 * it is set when we are closing the file descriptor and indicates
1465 * that mm_sem may be locked.
1466 */
Leon Romanovsky6ceb6332018-09-03 20:18:03 +03001467 bool closing;
Shachar Raindel8ada2c12014-12-11 17:04:17 +02001468
Yishai Hadas1c774832018-06-20 17:11:39 +03001469 bool cleanup_retryable;
Matan Barak38321252017-04-04 13:31:42 +03001470
Parav Pandit43579b52017-01-10 00:02:14 +00001471 struct ib_rdmacg_object cg_obj;
Leon Romanovsky60615212018-11-28 13:16:43 +02001472 /*
1473 * Implementation details of the RDMA core, don't use in drivers:
1474 */
1475 struct rdma_restrack_entry res;
Michal Kalderon3411f9f2019-10-30 11:44:11 +02001476 struct xarray mmap_xa;
Roland Dreiere2773c02005-07-07 17:57:10 -07001477};
1478
1479struct ib_uobject {
1480 u64 user_handle; /* handle given to us by userspace */
Jason Gunthorpe6a5e9c82018-07-04 11:32:07 +03001481 /* ufile & ucontext owning this object */
1482 struct ib_uverbs_file *ufile;
1483 /* FIXME, save memory: ufile->context == context */
Roland Dreiere2773c02005-07-07 17:57:10 -07001484 struct ib_ucontext *context; /* associated user context */
Roland Dreier9ead1902006-06-17 20:44:49 -07001485 void *object; /* containing object */
Roland Dreiere2773c02005-07-07 17:57:10 -07001486 struct list_head list; /* link to context's list */
Parav Pandit43579b52017-01-10 00:02:14 +00001487 struct ib_rdmacg_object cg_obj; /* rdmacg object */
Roland Dreierb3d636b2008-04-16 21:01:06 -07001488 int id; /* index into kernel idr */
Roland Dreier9ead1902006-06-17 20:44:49 -07001489 struct kref ref;
Matan Barak38321252017-04-04 13:31:42 +03001490 atomic_t usecnt; /* protects exclusive access */
Mike Marciniszynd144da82015-11-02 12:13:25 -05001491 struct rcu_head rcu; /* kfree_rcu() overhead */
Matan Barak38321252017-04-04 13:31:42 +03001492
Jason Gunthorpe6b0d08f2018-08-09 20:14:37 -06001493 const struct uverbs_api_object *uapi_object;
Roland Dreiere2773c02005-07-07 17:57:10 -07001494};
1495
Roland Dreiere2773c02005-07-07 17:57:10 -07001496struct ib_udata {
Yann Droneaud309243e2013-12-11 23:01:44 +01001497 const void __user *inbuf;
Roland Dreiere2773c02005-07-07 17:57:10 -07001498 void __user *outbuf;
1499 size_t inlen;
1500 size_t outlen;
1501};
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503struct ib_pd {
Jason Gunthorpe96249d72015-08-05 14:14:45 -06001504 u32 local_dma_lkey;
Christoph Hellwiged082d32016-09-05 12:56:17 +02001505 u32 flags;
Roland Dreiere2773c02005-07-07 17:57:10 -07001506 struct ib_device *device;
1507 struct ib_uobject *uobject;
1508 atomic_t usecnt; /* count all resources */
Christoph Hellwig50d46332016-09-05 12:56:16 +02001509
Christoph Hellwiged082d32016-09-05 12:56:17 +02001510 u32 unsafe_global_rkey;
1511
Christoph Hellwig50d46332016-09-05 12:56:16 +02001512 /*
1513 * Implementation details of the RDMA core, don't use in drivers:
1514 */
1515 struct ib_mr *__internal_mr;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001516 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517};
1518
Sean Hefty59991f92011-05-23 17:52:46 -07001519struct ib_xrcd {
1520 struct ib_device *device;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001521 atomic_t usecnt; /* count all exposed resources */
Sean Hefty53d0bd12011-05-24 08:33:46 -07001522 struct inode *inode;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001523
1524 struct mutex tgt_qp_mutex;
1525 struct list_head tgt_qp_list;
Sean Hefty59991f92011-05-23 17:52:46 -07001526};
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528struct ib_ah {
1529 struct ib_device *device;
1530 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001531 struct ib_uobject *uobject;
Jason Gunthorpe1a1f4602018-06-13 10:22:08 +03001532 const struct ib_gid_attr *sgid_attr;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001533 enum rdma_ah_attr_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534};
1535
1536typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1537
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001538enum ib_poll_context {
Jack Morgensteinf7948092018-08-27 08:35:55 +03001539 IB_POLL_DIRECT, /* caller context, no hw completions */
1540 IB_POLL_SOFTIRQ, /* poll from softirq context */
1541 IB_POLL_WORKQUEUE, /* poll from workqueue */
1542 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001543};
1544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545struct ib_cq {
Roland Dreiere2773c02005-07-07 17:57:10 -07001546 struct ib_device *device;
1547 struct ib_uobject *uobject;
1548 ib_comp_handler comp_handler;
1549 void (*event_handler)(struct ib_event *, void *);
Dotan Barak4deccd62008-07-14 23:48:44 -07001550 void *cq_context;
Roland Dreiere2773c02005-07-07 17:57:10 -07001551 int cqe;
1552 atomic_t usecnt; /* count number of work queues */
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001553 enum ib_poll_context poll_ctx;
1554 struct ib_wc *wc;
1555 union {
1556 struct irq_poll iop;
1557 struct work_struct work;
1558 };
Jack Morgensteinf7948092018-08-27 08:35:55 +03001559 struct workqueue_struct *comp_wq;
Yamin Friedmanda662972019-07-08 13:59:03 +03001560 struct dim *dim;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001561 /*
1562 * Implementation details of the RDMA core, don't use in drivers:
1563 */
1564 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565};
1566
1567struct ib_srq {
Roland Dreierd41fcc62005-08-18 12:23:08 -07001568 struct ib_device *device;
1569 struct ib_pd *pd;
1570 struct ib_uobject *uobject;
1571 void (*event_handler)(struct ib_event *, void *);
1572 void *srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -07001573 enum ib_srq_type srq_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 atomic_t usecnt;
Sean Hefty418d5132011-05-23 19:42:29 -07001575
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001576 struct {
1577 struct ib_cq *cq;
1578 union {
1579 struct {
1580 struct ib_xrcd *xrcd;
1581 u32 srq_num;
1582 } xrc;
1583 };
Sean Hefty418d5132011-05-23 19:42:29 -07001584 } ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585};
1586
Noa Osherovichebaaee22017-01-18 15:39:54 +02001587enum ib_raw_packet_caps {
1588 /* Strip cvlan from incoming packet and report it in the matching work
1589 * completion is supported.
1590 */
1591 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1592 /* Scatter FCS field of an incoming packet to host memory is supported.
1593 */
1594 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1595 /* Checksum offloads are supported (for both send and receive). */
1596 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
Maor Gottlieb7d9336d2017-05-30 10:29:10 +03001597 /* When a packet is received for an RQ with no receive WQEs, the
1598 * packet processing is delayed.
1599 */
1600 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
Noa Osherovichebaaee22017-01-18 15:39:54 +02001601};
1602
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001603enum ib_wq_type {
1604 IB_WQT_RQ
1605};
1606
1607enum ib_wq_state {
1608 IB_WQS_RESET,
1609 IB_WQS_RDY,
1610 IB_WQS_ERR
1611};
1612
1613struct ib_wq {
1614 struct ib_device *device;
1615 struct ib_uobject *uobject;
1616 void *wq_context;
1617 void (*event_handler)(struct ib_event *, void *);
1618 struct ib_pd *pd;
1619 struct ib_cq *cq;
1620 u32 wq_num;
1621 enum ib_wq_state state;
1622 enum ib_wq_type wq_type;
1623 atomic_t usecnt;
1624};
1625
Noa Osherovich10bac722017-01-18 15:39:55 +02001626enum ib_wq_flags {
1627 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
Noa Osherovich27b0df12017-01-18 15:39:57 +02001628 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
Maor Gottlieb7d9336d2017-05-30 10:29:10 +03001629 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
Noa Osheroviche1d2e882017-10-29 13:59:44 +02001630 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
Noa Osherovich10bac722017-01-18 15:39:55 +02001631};
1632
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001633struct ib_wq_init_attr {
1634 void *wq_context;
1635 enum ib_wq_type wq_type;
1636 u32 max_wr;
1637 u32 max_sge;
1638 struct ib_cq *cq;
1639 void (*event_handler)(struct ib_event *, void *);
Noa Osherovich10bac722017-01-18 15:39:55 +02001640 u32 create_flags; /* Use enum ib_wq_flags */
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001641};
1642
1643enum ib_wq_attr_mask {
Noa Osherovich10bac722017-01-18 15:39:55 +02001644 IB_WQ_STATE = 1 << 0,
1645 IB_WQ_CUR_STATE = 1 << 1,
1646 IB_WQ_FLAGS = 1 << 2,
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001647};
1648
1649struct ib_wq_attr {
1650 enum ib_wq_state wq_state;
1651 enum ib_wq_state curr_wq_state;
Noa Osherovich10bac722017-01-18 15:39:55 +02001652 u32 flags; /* Use enum ib_wq_flags */
1653 u32 flags_mask; /* Use enum ib_wq_flags */
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001654};
1655
Yishai Hadas6d397862016-05-23 15:20:51 +03001656struct ib_rwq_ind_table {
1657 struct ib_device *device;
1658 struct ib_uobject *uobject;
1659 atomic_t usecnt;
1660 u32 ind_tbl_num;
1661 u32 log_ind_tbl_size;
1662 struct ib_wq **ind_tbl;
1663};
1664
1665struct ib_rwq_ind_table_init_attr {
1666 u32 log_ind_tbl_size;
1667 /* Each entry is a pointer to Receive Work Queue */
1668 struct ib_wq **ind_tbl;
1669};
1670
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001671enum port_pkey_state {
1672 IB_PORT_PKEY_NOT_VALID = 0,
1673 IB_PORT_PKEY_VALID = 1,
1674 IB_PORT_PKEY_LISTED = 2,
1675};
1676
1677struct ib_qp_security;
1678
1679struct ib_port_pkey {
1680 enum port_pkey_state state;
1681 u16 pkey_index;
1682 u8 port_num;
1683 struct list_head qp_list;
1684 struct list_head to_error_list;
1685 struct ib_qp_security *sec;
1686};
1687
1688struct ib_ports_pkeys {
1689 struct ib_port_pkey main;
1690 struct ib_port_pkey alt;
1691};
1692
1693struct ib_qp_security {
1694 struct ib_qp *qp;
1695 struct ib_device *dev;
1696 /* Hold this mutex when changing port and pkey settings. */
1697 struct mutex mutex;
1698 struct ib_ports_pkeys *ports_pkeys;
1699 /* A list of all open shared QP handles. Required to enforce security
1700 * properly for all users of a shared QP.
1701 */
1702 struct list_head shared_qp_list;
1703 void *security;
1704 bool destroying;
1705 atomic_t error_list_count;
1706 struct completion error_complete;
1707 int error_comps_pending;
1708};
1709
Bart Van Assche632bc3f2016-07-21 13:03:30 -07001710/*
1711 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1712 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1713 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714struct ib_qp {
1715 struct ib_device *device;
1716 struct ib_pd *pd;
1717 struct ib_cq *send_cq;
1718 struct ib_cq *recv_cq;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001719 spinlock_t mr_lock;
1720 int mrs_used;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001721 struct list_head rdma_mrs;
Christoph Hellwig0e353e32016-05-03 18:01:12 +02001722 struct list_head sig_mrs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001724 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Sean Heftyd3d72d92011-05-26 23:06:44 -07001725 struct list_head xrcd_list;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001726
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001727 /* count times opened, mcast attaches, flow attaches */
1728 atomic_t usecnt;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001729 struct list_head open_list;
1730 struct ib_qp *real_qp;
Roland Dreiere2773c02005-07-07 17:57:10 -07001731 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 void (*event_handler)(struct ib_event *, void *);
1733 void *qp_context;
Jason Gunthorpe1a1f4602018-06-13 10:22:08 +03001734 /* sgid_attrs associated with the AV's */
1735 const struct ib_gid_attr *av_sgid_attr;
1736 const struct ib_gid_attr *alt_path_sgid_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 u32 qp_num;
Bart Van Assche632bc3f2016-07-21 13:03:30 -07001738 u32 max_write_sge;
1739 u32 max_read_sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 enum ib_qp_type qp_type;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001741 struct ib_rwq_ind_table *rwq_ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001742 struct ib_qp_security *qp_sec;
Noa Osherovich498ca3c2017-08-23 08:35:40 +03001743 u8 port;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001744
Max Gurtovoy185eddc2019-06-11 18:52:51 +03001745 bool integrity_en;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001746 /*
1747 * Implementation details of the RDMA core, don't use in drivers:
1748 */
1749 struct rdma_restrack_entry res;
Mark Zhang99fa3312019-07-02 13:02:35 +03001750
1751 /* The counter the qp is bind to */
1752 struct rdma_counter *counter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753};
1754
Ariel Levkovichbee76d72018-04-05 18:53:24 +03001755struct ib_dm {
1756 struct ib_device *device;
1757 u32 length;
1758 u32 flags;
1759 struct ib_uobject *uobject;
1760 atomic_t usecnt;
1761};
1762
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763struct ib_mr {
Roland Dreiere2773c02005-07-07 17:57:10 -07001764 struct ib_device *device;
1765 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001766 u32 lkey;
1767 u32 rkey;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001768 u64 iova;
Parav Panditedd31552017-09-24 21:46:31 +03001769 u64 length;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001770 unsigned int page_size;
Max Gurtovoya0bc0992019-06-11 18:52:38 +03001771 enum ib_mr_type type;
Steve Wised4a85c32016-05-03 18:01:08 +02001772 bool need_inval;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001773 union {
1774 struct ib_uobject *uobject; /* user */
1775 struct list_head qp_entry; /* FR */
1776 };
Steve Wisefccec5b2018-03-01 13:58:13 -08001777
Ariel Levkovichbe934cc2018-04-05 18:53:25 +03001778 struct ib_dm *dm;
Max Gurtovoy7c717d32019-06-11 18:52:41 +03001779 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
Steve Wisefccec5b2018-03-01 13:58:13 -08001780 /*
1781 * Implementation details of the RDMA core, don't use in drivers:
1782 */
1783 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784};
1785
1786struct ib_mw {
1787 struct ib_device *device;
1788 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001789 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 u32 rkey;
Shani Michaeli7083e422013-02-06 16:19:12 +00001791 enum ib_mw_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792};
1793
1794struct ib_fmr {
1795 struct ib_device *device;
1796 struct ib_pd *pd;
1797 struct list_head list;
1798 u32 lkey;
1799 u32 rkey;
1800};
1801
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001802/* Supported steering options */
1803enum ib_flow_attr_type {
1804 /* steering according to rule specifications */
1805 IB_FLOW_ATTR_NORMAL = 0x0,
1806 /* default unicast and multicast rule -
1807 * receive all Eth traffic which isn't steered to any QP
1808 */
1809 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1810 /* default multicast rule -
1811 * receive all Eth multicast traffic which isn't steered to any QP
1812 */
1813 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1814 /* sniffer rule - receive all port traffic */
1815 IB_FLOW_ATTR_SNIFFER = 0x3
1816};
1817
1818/* Supported steering header types */
1819enum ib_flow_spec_type {
1820 /* L2 headers*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001821 IB_FLOW_SPEC_ETH = 0x20,
1822 IB_FLOW_SPEC_IB = 0x22,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001823 /* L3 header*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001824 IB_FLOW_SPEC_IPV4 = 0x30,
1825 IB_FLOW_SPEC_IPV6 = 0x31,
Matan Barak56ab0b32018-03-28 09:27:49 +03001826 IB_FLOW_SPEC_ESP = 0x34,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001827 /* L4 headers*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001828 IB_FLOW_SPEC_TCP = 0x40,
1829 IB_FLOW_SPEC_UDP = 0x41,
Moses Reuben0dbf3332016-11-14 19:04:47 +02001830 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03001831 IB_FLOW_SPEC_GRE = 0x51,
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03001832 IB_FLOW_SPEC_MPLS = 0x60,
Moses Reubenfbf46862016-11-14 19:04:51 +02001833 IB_FLOW_SPEC_INNER = 0x100,
Moses Reuben460d0192017-01-18 14:59:48 +02001834 /* Actions */
1835 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
Slava Shwartsman483a3962017-04-03 13:13:51 +03001836 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
Matan Barak9b828442018-03-28 09:27:46 +03001837 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
Raed Salem7eea23a2018-05-31 16:43:36 +03001838 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001839};
Matan Barak240ae002013-11-07 15:25:13 +02001840#define IB_FLOW_SPEC_LAYER_MASK 0xF0
Raed Salem7eea23a2018-05-31 16:43:36 +03001841#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
Matan Barak22878db2013-09-01 18:39:52 +03001842
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001843/* Flow steering rule priority is set according to it's domain.
1844 * Lower domain value means higher priority.
1845 */
1846enum ib_flow_domain {
1847 IB_FLOW_DOMAIN_USER,
1848 IB_FLOW_DOMAIN_ETHTOOL,
1849 IB_FLOW_DOMAIN_RFS,
1850 IB_FLOW_DOMAIN_NIC,
1851 IB_FLOW_DOMAIN_NUM /* Must be last */
1852};
1853
Marina Varshavera3100a72016-02-18 18:31:05 +02001854enum ib_flow_flags {
1855 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
Boris Pismenny21e82d32018-03-28 09:27:47 +03001856 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1857 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
Marina Varshavera3100a72016-02-18 18:31:05 +02001858};
1859
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001860struct ib_flow_eth_filter {
1861 u8 dst_mac[6];
1862 u8 src_mac[6];
1863 __be16 ether_type;
1864 __be16 vlan_tag;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001865 /* Must be last */
1866 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001867};
1868
1869struct ib_flow_spec_eth {
Moses Reubenfbf46862016-11-14 19:04:51 +02001870 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001871 u16 size;
1872 struct ib_flow_eth_filter val;
1873 struct ib_flow_eth_filter mask;
1874};
1875
Matan Barak240ae002013-11-07 15:25:13 +02001876struct ib_flow_ib_filter {
1877 __be16 dlid;
1878 __u8 sl;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001879 /* Must be last */
1880 u8 real_sz[0];
Matan Barak240ae002013-11-07 15:25:13 +02001881};
1882
1883struct ib_flow_spec_ib {
Moses Reubenfbf46862016-11-14 19:04:51 +02001884 u32 type;
Matan Barak240ae002013-11-07 15:25:13 +02001885 u16 size;
1886 struct ib_flow_ib_filter val;
1887 struct ib_flow_ib_filter mask;
1888};
1889
Maor Gottlieb989a3a82016-08-30 16:58:33 +03001890/* IPv4 header flags */
1891enum ib_ipv4_flags {
1892 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1893 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
1894 last have this flag set */
1895};
1896
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001897struct ib_flow_ipv4_filter {
1898 __be32 src_ip;
1899 __be32 dst_ip;
Maor Gottlieb989a3a82016-08-30 16:58:33 +03001900 u8 proto;
1901 u8 tos;
1902 u8 ttl;
1903 u8 flags;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001904 /* Must be last */
1905 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001906};
1907
1908struct ib_flow_spec_ipv4 {
Moses Reubenfbf46862016-11-14 19:04:51 +02001909 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001910 u16 size;
1911 struct ib_flow_ipv4_filter val;
1912 struct ib_flow_ipv4_filter mask;
1913};
1914
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001915struct ib_flow_ipv6_filter {
1916 u8 src_ip[16];
1917 u8 dst_ip[16];
Maor Gottlieba72c6a22016-08-30 16:58:34 +03001918 __be32 flow_label;
1919 u8 next_hdr;
1920 u8 traffic_class;
1921 u8 hop_limit;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001922 /* Must be last */
1923 u8 real_sz[0];
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001924};
1925
1926struct ib_flow_spec_ipv6 {
Moses Reubenfbf46862016-11-14 19:04:51 +02001927 u32 type;
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001928 u16 size;
1929 struct ib_flow_ipv6_filter val;
1930 struct ib_flow_ipv6_filter mask;
1931};
1932
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001933struct ib_flow_tcp_udp_filter {
1934 __be16 dst_port;
1935 __be16 src_port;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001936 /* Must be last */
1937 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001938};
1939
1940struct ib_flow_spec_tcp_udp {
Moses Reubenfbf46862016-11-14 19:04:51 +02001941 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001942 u16 size;
1943 struct ib_flow_tcp_udp_filter val;
1944 struct ib_flow_tcp_udp_filter mask;
1945};
1946
Moses Reuben0dbf3332016-11-14 19:04:47 +02001947struct ib_flow_tunnel_filter {
1948 __be32 tunnel_id;
1949 u8 real_sz[0];
1950};
1951
1952/* ib_flow_spec_tunnel describes the Vxlan tunnel
1953 * the tunnel_id from val has the vni value
1954 */
1955struct ib_flow_spec_tunnel {
Moses Reubenfbf46862016-11-14 19:04:51 +02001956 u32 type;
Moses Reuben0dbf3332016-11-14 19:04:47 +02001957 u16 size;
1958 struct ib_flow_tunnel_filter val;
1959 struct ib_flow_tunnel_filter mask;
1960};
1961
Matan Barak56ab0b32018-03-28 09:27:49 +03001962struct ib_flow_esp_filter {
1963 __be32 spi;
1964 __be32 seq;
1965 /* Must be last */
1966 u8 real_sz[0];
1967};
1968
1969struct ib_flow_spec_esp {
1970 u32 type;
1971 u16 size;
1972 struct ib_flow_esp_filter val;
1973 struct ib_flow_esp_filter mask;
1974};
1975
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03001976struct ib_flow_gre_filter {
1977 __be16 c_ks_res0_ver;
1978 __be16 protocol;
1979 __be32 key;
1980 /* Must be last */
1981 u8 real_sz[0];
1982};
1983
1984struct ib_flow_spec_gre {
1985 u32 type;
1986 u16 size;
1987 struct ib_flow_gre_filter val;
1988 struct ib_flow_gre_filter mask;
1989};
1990
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03001991struct ib_flow_mpls_filter {
1992 __be32 tag;
1993 /* Must be last */
1994 u8 real_sz[0];
1995};
1996
1997struct ib_flow_spec_mpls {
1998 u32 type;
1999 u16 size;
2000 struct ib_flow_mpls_filter val;
2001 struct ib_flow_mpls_filter mask;
2002};
2003
Moses Reuben460d0192017-01-18 14:59:48 +02002004struct ib_flow_spec_action_tag {
2005 enum ib_flow_spec_type type;
2006 u16 size;
2007 u32 tag_id;
2008};
2009
Slava Shwartsman483a3962017-04-03 13:13:51 +03002010struct ib_flow_spec_action_drop {
2011 enum ib_flow_spec_type type;
2012 u16 size;
2013};
2014
Matan Barak9b828442018-03-28 09:27:46 +03002015struct ib_flow_spec_action_handle {
2016 enum ib_flow_spec_type type;
2017 u16 size;
2018 struct ib_flow_action *act;
2019};
2020
Raed Salem7eea23a2018-05-31 16:43:36 +03002021enum ib_counters_description {
2022 IB_COUNTER_PACKETS,
2023 IB_COUNTER_BYTES,
2024};
2025
2026struct ib_flow_spec_action_count {
2027 enum ib_flow_spec_type type;
2028 u16 size;
2029 struct ib_counters *counters;
2030};
2031
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002032union ib_flow_spec {
2033 struct {
Moses Reubenfbf46862016-11-14 19:04:51 +02002034 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002035 u16 size;
2036 };
2037 struct ib_flow_spec_eth eth;
Matan Barak240ae002013-11-07 15:25:13 +02002038 struct ib_flow_spec_ib ib;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002039 struct ib_flow_spec_ipv4 ipv4;
2040 struct ib_flow_spec_tcp_udp tcp_udp;
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03002041 struct ib_flow_spec_ipv6 ipv6;
Moses Reuben0dbf3332016-11-14 19:04:47 +02002042 struct ib_flow_spec_tunnel tunnel;
Matan Barak56ab0b32018-03-28 09:27:49 +03002043 struct ib_flow_spec_esp esp;
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03002044 struct ib_flow_spec_gre gre;
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03002045 struct ib_flow_spec_mpls mpls;
Moses Reuben460d0192017-01-18 14:59:48 +02002046 struct ib_flow_spec_action_tag flow_tag;
Slava Shwartsman483a3962017-04-03 13:13:51 +03002047 struct ib_flow_spec_action_drop drop;
Matan Barak9b828442018-03-28 09:27:46 +03002048 struct ib_flow_spec_action_handle action;
Raed Salem7eea23a2018-05-31 16:43:36 +03002049 struct ib_flow_spec_action_count flow_count;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002050};
2051
2052struct ib_flow_attr {
2053 enum ib_flow_attr_type type;
2054 u16 size;
2055 u16 priority;
2056 u32 flags;
2057 u8 num_of_specs;
2058 u8 port;
Matthew Wilcox7654cb12018-06-07 07:57:16 -07002059 union ib_flow_spec flows[];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002060};
2061
2062struct ib_flow {
2063 struct ib_qp *qp;
Yishai Hadas6cd080a2018-07-23 15:25:08 +03002064 struct ib_device *device;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002065 struct ib_uobject *uobject;
2066};
2067
Matan Barak2eb9bea2018-03-28 09:27:45 +03002068enum ib_flow_action_type {
2069 IB_FLOW_ACTION_UNSPECIFIED,
2070 IB_FLOW_ACTION_ESP = 1,
2071};
2072
2073struct ib_flow_action_attrs_esp_keymats {
2074 enum ib_uverbs_flow_action_esp_keymat protocol;
2075 union {
2076 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2077 } keymat;
2078};
2079
2080struct ib_flow_action_attrs_esp_replays {
2081 enum ib_uverbs_flow_action_esp_replay protocol;
2082 union {
2083 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2084 } replay;
2085};
2086
2087enum ib_flow_action_attrs_esp_flags {
2088 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2089 * This is done in order to share the same flags between user-space and
2090 * kernel and spare an unnecessary translation.
2091 */
2092
2093 /* Kernel flags */
2094 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
Matan Barak7d12f8d2018-03-28 09:27:48 +03002095 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
Matan Barak2eb9bea2018-03-28 09:27:45 +03002096};
2097
2098struct ib_flow_spec_list {
2099 struct ib_flow_spec_list *next;
2100 union ib_flow_spec spec;
2101};
2102
2103struct ib_flow_action_attrs_esp {
2104 struct ib_flow_action_attrs_esp_keymats *keymat;
2105 struct ib_flow_action_attrs_esp_replays *replay;
2106 struct ib_flow_spec_list *encap;
2107 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2108 * Value of 0 is a valid value.
2109 */
2110 u32 esn;
2111 u32 spi;
2112 u32 seq;
2113 u32 tfc_pad;
2114 /* Use enum ib_flow_action_attrs_esp_flags */
2115 u64 flags;
2116 u64 hard_limit_pkts;
2117};
2118
2119struct ib_flow_action {
2120 struct ib_device *device;
2121 struct ib_uobject *uobject;
2122 enum ib_flow_action_type type;
2123 atomic_t usecnt;
2124};
2125
Leon Romanovskye26e7b82019-10-29 08:27:45 +02002126struct ib_mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127struct ib_grh;
2128
2129enum ib_process_mad_flags {
2130 IB_MAD_IGNORE_MKEY = 1,
2131 IB_MAD_IGNORE_BKEY = 2,
2132 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2133};
2134
2135enum ib_mad_result {
2136 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
2137 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
2138 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
2139 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
2140};
2141
Jack Wang21d64542017-01-17 10:11:12 +01002142struct ib_port_cache {
Daniel Jurgens883c71f2017-05-19 15:48:51 +03002143 u64 subnet_prefix;
Jack Wang21d64542017-01-17 10:11:12 +01002144 struct ib_pkey_cache *pkey;
2145 struct ib_gid_table *gid;
2146 u8 lmc;
2147 enum ib_port_state port_state;
2148};
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150struct ib_cache {
2151 rwlock_t lock;
2152 struct ib_event_handler event_handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153};
2154
Ira Weiny77386132015-05-13 20:02:58 -04002155struct ib_port_immutable {
2156 int pkey_tbl_len;
2157 int gid_tbl_len;
Ira Weinyf9b22e32015-05-13 20:02:59 -04002158 u32 core_cap_flags;
Ira Weiny337877a2015-06-06 14:38:29 -04002159 u32 max_mad_size;
Ira Weiny77386132015-05-13 20:02:58 -04002160};
2161
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002162struct ib_port_data {
Jason Gunthorpe324e2272019-02-12 21:12:51 -07002163 struct ib_device *ib_dev;
2164
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002165 struct ib_port_immutable immutable;
2166
2167 spinlock_t pkey_list_lock;
2168 struct list_head pkey_list;
Jason Gunthorpe8faea9f2019-02-12 21:12:49 -07002169
2170 struct ib_port_cache cache;
Jason Gunthorpec2261dd2019-02-12 21:12:50 -07002171
2172 spinlock_t netdev_lock;
Jason Gunthorpe324e2272019-02-12 21:12:51 -07002173 struct net_device __rcu *netdev;
2174 struct hlist_node ndev_hash_link;
Mark Zhang413d3342019-07-02 13:02:34 +03002175 struct rdma_port_counter port_counter;
Mark Zhang6e7be472019-07-02 13:02:46 +03002176 struct rdma_hw_stats *hw_stats;
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002177};
2178
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002179/* rdma netdev type - specifies protocol type */
2180enum rdma_netdev_t {
Niranjana Vishwanathapuraf0ad83ac2017-04-10 11:22:25 +03002181 RDMA_NETDEV_OPA_VNIC,
2182 RDMA_NETDEV_IPOIB,
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002183};
2184
2185/**
2186 * struct rdma_netdev - rdma netdev
2187 * For cases where netstack interfacing is required.
2188 */
2189struct rdma_netdev {
2190 void *clnt_priv;
2191 struct ib_device *hca;
2192 u8 port_num;
2193
Jason Gunthorpe9f49a5b2018-07-29 11:34:56 +03002194 /*
2195 * cleanup function must be specified.
2196 * FIXME: This is only used for OPA_VNIC and that usage should be
2197 * removed too.
2198 */
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07002199 void (*free_rdma_netdev)(struct net_device *netdev);
2200
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002201 /* control functions */
2202 void (*set_id)(struct net_device *netdev, int id);
Niranjana Vishwanathapuraf0ad83ac2017-04-10 11:22:25 +03002203 /* send packet */
2204 int (*send)(struct net_device *dev, struct sk_buff *skb,
2205 struct ib_ah *address, u32 dqpn);
2206 /* multicast */
2207 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2208 union ib_gid *gid, u16 mlid,
2209 int set_qkey, u32 qkey);
2210 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2211 union ib_gid *gid, u16 mlid);
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002212};
2213
Denis Drozdovf6a8a192018-08-14 14:08:51 +03002214struct rdma_netdev_alloc_params {
2215 size_t sizeof_priv;
2216 unsigned int txqs;
2217 unsigned int rxqs;
2218 void *param;
2219
2220 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2221 struct net_device *netdev, void *param);
2222};
2223
Erez Alfasia3de94e2019-10-16 09:23:05 +03002224struct ib_odp_counters {
2225 atomic64_t faults;
2226 atomic64_t invalidations;
2227};
2228
Raed Salemfa9b1802018-05-31 16:43:31 +03002229struct ib_counters {
2230 struct ib_device *device;
2231 struct ib_uobject *uobject;
2232 /* num of objects attached */
2233 atomic_t usecnt;
2234};
2235
Raed Salem51d7a532018-05-31 16:43:33 +03002236struct ib_counters_read_attr {
2237 u64 *counters_buff;
2238 u32 ncounters;
2239 u32 flags; /* use enum ib_read_counters_flags */
2240};
2241
Matan Barak2eb9bea2018-03-28 09:27:45 +03002242struct uverbs_attr_bundle;
Kamal Heibdd05cb82019-04-29 14:59:06 +03002243struct iw_cm_id;
2244struct iw_cm_conn_param;
Matan Barak2eb9bea2018-03-28 09:27:45 +03002245
Leon Romanovsky30471d42019-02-03 14:55:50 +02002246#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2247 .size_##ib_struct = \
2248 (sizeof(struct drv_struct) + \
2249 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2250 BUILD_BUG_ON_ZERO( \
2251 !__same_type(((struct drv_struct *)NULL)->member, \
2252 struct ib_struct)))
2253
Leon Romanovskyf6316032019-03-28 15:12:58 +02002254#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2255 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2256
Leon Romanovsky30471d42019-02-03 14:55:50 +02002257#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
Leon Romanovskyf6316032019-03-28 15:12:58 +02002258 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
Leon Romanovsky30471d42019-02-03 14:55:50 +02002259
2260#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2261
Michal Kalderon3411f9f2019-10-30 11:44:11 +02002262struct rdma_user_mmap_entry {
2263 struct kref ref;
2264 struct ib_ucontext *ucontext;
2265 unsigned long start_pgoff;
2266 size_t npages;
2267 bool driver_removed;
2268};
2269
2270/* Return the offset (in bytes) the user should pass to libc's mmap() */
2271static inline u64
2272rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2273{
2274 return (u64)entry->start_pgoff << PAGE_SHIFT;
2275}
2276
Kamal Heib521ed0d2018-12-10 21:09:30 +02002277/**
2278 * struct ib_device_ops - InfiniBand device operations
2279 * This structure defines all the InfiniBand device operations, providers will
2280 * need to define the supported operations, otherwise they will be set to null.
2281 */
2282struct ib_device_ops {
Jason Gunthorpe7a154142019-06-05 14:39:26 -03002283 struct module *owner;
Jason Gunthorpeb9560a42019-06-05 14:39:24 -03002284 enum rdma_driver_id driver_id;
Jason Gunthorpe72c6ec12019-06-05 14:39:25 -03002285 u32 uverbs_abi_ver;
Jason Gunthorpe8f71bb02019-06-13 21:38:19 -03002286 unsigned int uverbs_no_driver_id_binding:1;
Jason Gunthorpeb9560a42019-06-05 14:39:24 -03002287
Kamal Heib521ed0d2018-12-10 21:09:30 +02002288 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2289 const struct ib_send_wr **bad_send_wr);
2290 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2291 const struct ib_recv_wr **bad_recv_wr);
2292 void (*drain_rq)(struct ib_qp *qp);
2293 void (*drain_sq)(struct ib_qp *qp);
2294 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2295 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2296 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2297 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2298 int (*post_srq_recv)(struct ib_srq *srq,
2299 const struct ib_recv_wr *recv_wr,
2300 const struct ib_recv_wr **bad_recv_wr);
2301 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2302 u8 port_num, const struct ib_wc *in_wc,
2303 const struct ib_grh *in_grh,
Leon Romanovskye26e7b82019-10-29 08:27:45 +02002304 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2305 size_t *out_mad_size, u16 *out_mad_pkey_index);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002306 int (*query_device)(struct ib_device *device,
2307 struct ib_device_attr *device_attr,
2308 struct ib_udata *udata);
2309 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2310 struct ib_device_modify *device_modify);
2311 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2312 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2313 int comp_vector);
2314 int (*query_port)(struct ib_device *device, u8 port_num,
2315 struct ib_port_attr *port_attr);
2316 int (*modify_port)(struct ib_device *device, u8 port_num,
2317 int port_modify_mask,
2318 struct ib_port_modify *port_modify);
2319 /**
2320 * The following mandatory functions are used only at device
2321 * registration. Keep functions such as these at the end of this
2322 * structure to avoid cache line misses when accessing struct ib_device
2323 * in fast paths.
2324 */
2325 int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2326 struct ib_port_immutable *immutable);
2327 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2328 u8 port_num);
2329 /**
2330 * When calling get_netdev, the HW vendor's driver should return the
2331 * net device of device @device at port @port_num or NULL if such
2332 * a net device doesn't exist. The vendor driver should call dev_hold
2333 * on this net device. The HW vendor's device driver must guarantee
2334 * that this function returns NULL before the net device has finished
2335 * NETDEV_UNREGISTER state.
2336 */
2337 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2338 /**
2339 * rdma netdev operation
2340 *
2341 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2342 * must return -EOPNOTSUPP if it doesn't support the specified type.
2343 */
2344 struct net_device *(*alloc_rdma_netdev)(
2345 struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2346 const char *name, unsigned char name_assign_type,
2347 void (*setup)(struct net_device *));
2348
2349 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2350 enum rdma_netdev_t type,
2351 struct rdma_netdev_alloc_params *params);
2352 /**
2353 * query_gid should be return GID value for @device, when @port_num
2354 * link layer is either IB or iWarp. It is no-op if @port_num port
2355 * is RoCE link layer.
2356 */
2357 int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2358 union ib_gid *gid);
2359 /**
2360 * When calling add_gid, the HW vendor's driver should add the gid
2361 * of device of port at gid index available at @attr. Meta-info of
2362 * that gid (for example, the network device related to this gid) is
2363 * available at @attr. @context allows the HW vendor driver to store
2364 * extra information together with a GID entry. The HW vendor driver may
2365 * allocate memory to contain this information and store it in @context
2366 * when a new GID entry is written to. Params are consistent until the
2367 * next call of add_gid or delete_gid. The function should return 0 on
2368 * success or error otherwise. The function could be called
2369 * concurrently for different ports. This function is only called when
2370 * roce_gid_table is used.
2371 */
2372 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2373 /**
2374 * When calling del_gid, the HW vendor's driver should delete the
2375 * gid of device @device at gid index gid_index of port port_num
2376 * available in @attr.
2377 * Upon the deletion of a GID entry, the HW vendor must free any
2378 * allocated memory. The caller will clear @context afterwards.
2379 * This function is only called when roce_gid_table is used.
2380 */
2381 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2382 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2383 u16 *pkey);
Leon Romanovskya2a074e2019-02-12 20:39:16 +02002384 int (*alloc_ucontext)(struct ib_ucontext *context,
2385 struct ib_udata *udata);
2386 void (*dealloc_ucontext)(struct ib_ucontext *context);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002387 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
Michal Kalderon3411f9f2019-10-30 11:44:11 +02002388 /**
2389 * This will be called once refcount of an entry in mmap_xa reaches
2390 * zero. The type of the memory that was mapped may differ between
2391 * entries and is opaque to the rdma_user_mmap interface.
2392 * Therefore needs to be implemented by the driver in mmap_free.
2393 */
2394 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002395 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
Shamir Rabinovitchff23dfa2019-03-31 19:10:07 +03002396 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002397 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
Leon Romanovskyd3456912019-04-03 16:42:42 +03002398 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
2399 u32 flags, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002400 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2401 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Leon Romanovskyd3456912019-04-03 16:42:42 +03002402 void (*destroy_ah)(struct ib_ah *ah, u32 flags);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002403 int (*create_srq)(struct ib_srq *srq,
2404 struct ib_srq_init_attr *srq_init_attr,
2405 struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002406 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2407 enum ib_srq_attr_mask srq_attr_mask,
2408 struct ib_udata *udata);
2409 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002410 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002411 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2412 struct ib_qp_init_attr *qp_init_attr,
2413 struct ib_udata *udata);
2414 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2415 int qp_attr_mask, struct ib_udata *udata);
2416 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2417 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002418 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
Leon Romanovskye39afe32019-05-28 14:37:29 +03002419 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2420 struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002421 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
Leon Romanovskya52c8e22019-05-28 14:37:28 +03002422 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002423 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2424 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2425 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2426 u64 virt_addr, int mr_access_flags,
2427 struct ib_udata *udata);
2428 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2429 u64 virt_addr, int mr_access_flags,
2430 struct ib_pd *pd, struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002431 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002432 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002433 u32 max_num_sg, struct ib_udata *udata);
Israel Rukshin26bc7ea2019-06-11 18:52:39 +03002434 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2435 u32 max_num_data_sg,
2436 u32 max_num_meta_sg);
Moni Shouaad8a4492018-12-11 13:37:52 +02002437 int (*advise_mr)(struct ib_pd *pd,
2438 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2439 struct ib_sge *sg_list, u32 num_sge,
2440 struct uverbs_attr_bundle *attrs);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002441 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2442 unsigned int *sg_offset);
2443 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2444 struct ib_mr_status *mr_status);
2445 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2446 struct ib_udata *udata);
2447 int (*dealloc_mw)(struct ib_mw *mw);
2448 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
2449 struct ib_fmr_attr *fmr_attr);
2450 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
2451 u64 iova);
2452 int (*unmap_fmr)(struct list_head *fmr_list);
2453 int (*dealloc_fmr)(struct ib_fmr *fmr);
2454 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2455 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2456 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
Kamal Heib521ed0d2018-12-10 21:09:30 +02002457 struct ib_udata *udata);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002458 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002459 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2460 struct ib_flow_attr *flow_attr,
2461 int domain, struct ib_udata *udata);
2462 int (*destroy_flow)(struct ib_flow *flow_id);
2463 struct ib_flow_action *(*create_flow_action_esp)(
2464 struct ib_device *device,
2465 const struct ib_flow_action_attrs_esp *attr,
2466 struct uverbs_attr_bundle *attrs);
2467 int (*destroy_flow_action)(struct ib_flow_action *action);
2468 int (*modify_flow_action_esp)(
2469 struct ib_flow_action *action,
2470 const struct ib_flow_action_attrs_esp *attr,
2471 struct uverbs_attr_bundle *attrs);
2472 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2473 int state);
2474 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2475 struct ifla_vf_info *ivf);
2476 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2477 struct ifla_vf_stats *stats);
Danit Goldbergbfcb3c5d2019-11-06 15:08:32 +02002478 int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
2479 struct ifla_vf_guid *node_guid,
2480 struct ifla_vf_guid *port_guid);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002481 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2482 int type);
2483 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2484 struct ib_wq_init_attr *init_attr,
2485 struct ib_udata *udata);
Leon Romanovskya49b1dc2019-06-12 15:27:41 +03002486 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002487 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2488 u32 wq_attr_mask, struct ib_udata *udata);
2489 struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2490 struct ib_device *device,
2491 struct ib_rwq_ind_table_init_attr *init_attr,
2492 struct ib_udata *udata);
2493 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2494 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2495 struct ib_ucontext *context,
2496 struct ib_dm_alloc_attr *attr,
2497 struct uverbs_attr_bundle *attrs);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002498 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002499 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2500 struct ib_dm_mr_attr *attr,
2501 struct uverbs_attr_bundle *attrs);
2502 struct ib_counters *(*create_counters)(
2503 struct ib_device *device, struct uverbs_attr_bundle *attrs);
2504 int (*destroy_counters)(struct ib_counters *counters);
2505 int (*read_counters)(struct ib_counters *counters,
2506 struct ib_counters_read_attr *counters_read_attr,
2507 struct uverbs_attr_bundle *attrs);
Max Gurtovoy2cdfcdd2019-06-11 18:52:40 +03002508 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2509 int data_sg_nents, unsigned int *data_sg_offset,
2510 struct scatterlist *meta_sg, int meta_sg_nents,
2511 unsigned int *meta_sg_offset);
2512
Kamal Heib521ed0d2018-12-10 21:09:30 +02002513 /**
2514 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2515 * driver initialized data. The struct is kfree()'ed by the sysfs
2516 * core when the device is removed. A lifespan of -1 in the return
2517 * struct tells the core to set a default lifespan.
2518 */
2519 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2520 u8 port_num);
2521 /**
2522 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2523 * @index - The index in the value array we wish to have updated, or
2524 * num_counters if we want all stats updated
2525 * Return codes -
2526 * < 0 - Error, no counters updated
2527 * index - Updated the single counter pointed to by index
2528 * num_counters - Updated all counters (will reset the timestamp
2529 * and prevent further calls for lifespan milliseconds)
2530 * Drivers are allowed to update all counters in leiu of just the
2531 * one given in index at their option
2532 */
2533 int (*get_hw_stats)(struct ib_device *device,
2534 struct rdma_hw_stats *stats, u8 port, int index);
Parav Panditea4baf72018-12-18 14:28:30 +02002535 /*
2536 * This function is called once for each port when a ib device is
2537 * registered.
2538 */
2539 int (*init_port)(struct ib_device *device, u8 port_num,
2540 struct kobject *port_sysfs);
Leon Romanovsky02da3752019-01-30 12:49:02 +02002541 /**
2542 * Allows rdma drivers to add their own restrack attributes.
2543 */
2544 int (*fill_res_entry)(struct sk_buff *msg,
2545 struct rdma_restrack_entry *entry);
Leon Romanovsky21a428a2019-02-03 14:55:51 +02002546
Jason Gunthorped0899892019-02-12 21:12:53 -07002547 /* Device lifecycle callbacks */
2548 /*
Jason Gunthorpeca223542019-02-12 21:12:56 -07002549 * Called after the device becomes registered, before clients are
2550 * attached
2551 */
2552 int (*enable_driver)(struct ib_device *dev);
2553 /*
Jason Gunthorped0899892019-02-12 21:12:53 -07002554 * This is called as part of ib_dealloc_device().
2555 */
2556 void (*dealloc_driver)(struct ib_device *dev);
2557
Kamal Heibdd05cb82019-04-29 14:59:06 +03002558 /* iWarp CM callbacks */
2559 void (*iw_add_ref)(struct ib_qp *qp);
2560 void (*iw_rem_ref)(struct ib_qp *qp);
2561 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2562 int (*iw_connect)(struct iw_cm_id *cm_id,
2563 struct iw_cm_conn_param *conn_param);
2564 int (*iw_accept)(struct iw_cm_id *cm_id,
2565 struct iw_cm_conn_param *conn_param);
2566 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2567 u8 pdata_len);
2568 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2569 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
Mark Zhang99fa3312019-07-02 13:02:35 +03002570 /**
2571 * counter_bind_qp - Bind a QP to a counter.
2572 * @counter - The counter to be bound. If counter->id is zero then
2573 * the driver needs to allocate a new counter and set counter->id
2574 */
2575 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2576 /**
2577 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2578 * counter and bind it onto the default one
2579 */
2580 int (*counter_unbind_qp)(struct ib_qp *qp);
2581 /**
2582 * counter_dealloc -De-allocate the hw counter
2583 */
2584 int (*counter_dealloc)(struct rdma_counter *counter);
Mark Zhangc4ffee72019-07-02 13:02:40 +03002585 /**
2586 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2587 * the driver initialized data.
2588 */
2589 struct rdma_hw_stats *(*counter_alloc_stats)(
2590 struct rdma_counter *counter);
2591 /**
2592 * counter_update_stats - Query the stats value of this counter
2593 */
2594 int (*counter_update_stats)(struct rdma_counter *counter);
Kamal Heibdd05cb82019-04-29 14:59:06 +03002595
Erez Alfasi4061ff72019-10-16 09:23:08 +03002596 /**
2597 * Allows rdma drivers to add their own restrack attributes
2598 * dumped via 'rdma stat' iproute2 command.
2599 */
2600 int (*fill_stat_entry)(struct sk_buff *msg,
2601 struct rdma_restrack_entry *entry);
2602
Leon Romanovskyd3456912019-04-03 16:42:42 +03002603 DECLARE_RDMA_OBJ_SIZE(ib_ah);
Leon Romanovskye39afe32019-05-28 14:37:29 +03002604 DECLARE_RDMA_OBJ_SIZE(ib_cq);
Leon Romanovsky21a428a2019-02-03 14:55:51 +02002605 DECLARE_RDMA_OBJ_SIZE(ib_pd);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002606 DECLARE_RDMA_OBJ_SIZE(ib_srq);
Leon Romanovskya2a074e2019-02-12 20:39:16 +02002607 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002608};
2609
Parav Panditcebe5562019-02-26 13:56:11 +02002610struct ib_core_device {
2611 /* device must be the first element in structure until,
2612 * union of ib_core_device and device exists in ib_device.
2613 */
2614 struct device dev;
Parav Pandit4e0f7b92019-02-26 13:56:13 +02002615 possible_net_t rdma_net;
Parav Panditcebe5562019-02-26 13:56:11 +02002616 struct kobject *ports_kobj;
2617 struct list_head port_list;
2618 struct ib_device *owner; /* reach back to owner ib_device */
2619};
Leon Romanovsky41eda652019-02-18 22:25:47 +02002620
Parav Panditcebe5562019-02-26 13:56:11 +02002621struct rdma_restrack_root;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622struct ib_device {
Bart Van Assche0957c292017-03-07 22:56:53 +00002623 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2624 struct device *dma_device;
Kamal Heib3023a1e2018-12-10 21:09:48 +02002625 struct ib_device_ops ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 char name[IB_DEVICE_NAME_MAX];
Jason Gunthorpe324e2272019-02-12 21:12:51 -07002627 struct rcu_head rcu_head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
2629 struct list_head event_handler_list;
2630 spinlock_t event_handler_lock;
2631
Jason Gunthorpe921eab12019-02-06 22:41:54 -07002632 struct rw_semaphore client_data_rwsem;
Jason Gunthorpe0df91bb2019-02-06 22:41:53 -07002633 struct xarray client_data;
Jason Gunthorped0899892019-02-12 21:12:53 -07002634 struct mutex unregistration_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635
2636 struct ib_cache cache;
Ira Weiny77386132015-05-13 20:02:58 -04002637 /**
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002638 * port_data is indexed by port number
Ira Weiny77386132015-05-13 20:02:58 -04002639 */
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07002640 struct ib_port_data *port_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03002642 int num_comp_vectors;
2643
Parav Panditcebe5562019-02-26 13:56:11 +02002644 union {
2645 struct device dev;
2646 struct ib_core_device coredev;
2647 };
2648
Parav Panditd4122f52018-10-11 22:31:53 +03002649 /* First group for device attributes,
2650 * Second group for driver provided attributes (optional).
2651 * It is NULL terminated array.
2652 */
2653 const struct attribute_group *groups[3];
Parav Panditadee9f32018-09-05 09:47:58 +03002654
Alexander Chiang17a55f72010-02-02 19:09:16 +00002655 u64 uverbs_cmd_mask;
Yann Droneaudf21519b2013-11-06 23:21:49 +01002656 u64 uverbs_ex_cmd_mask;
Roland Dreier274c0892005-09-29 14:17:48 -07002657
Yuval Shaiabd99fde2016-08-25 10:57:07 -07002658 char node_desc[IB_DEVICE_NODE_DESC_MAX];
Sean Heftycf311cd2006-01-10 07:39:34 -08002659 __be64 node_guid;
Steve Wise96f15c02008-07-14 23:48:53 -07002660 u32 local_dma_lkey;
Hal Rosenstock41390322015-06-29 09:57:00 -04002661 u16 is_switch:1;
Gal Pressman6780c4f2019-01-22 10:08:22 +02002662 /* Indicates kernel verbs support, should not be used in drivers */
2663 u16 kverbs_provider:1;
Yamin Friedmanda662972019-07-08 13:59:03 +03002664 /* CQ adaptive moderation (RDMA DIM) */
2665 u16 use_cq_dim:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 u8 node_type;
2667 u8 phys_port_cnt;
Ira Weiny3e153a92015-12-18 10:59:44 +02002668 struct ib_device_attr attrs;
Christoph Lameterb40f4752016-05-16 12:49:33 -05002669 struct attribute_group *hw_stats_ag;
2670 struct rdma_hw_stats *hw_stats;
Ira Weiny77386132015-05-13 20:02:58 -04002671
Parav Pandit43579b52017-01-10 00:02:14 +00002672#ifdef CONFIG_CGROUP_RDMA
2673 struct rdmacg_device cg_device;
2674#endif
2675
Leon Romanovskyecc82c52017-06-18 14:39:59 +03002676 u32 index;
Leon Romanovsky41eda652019-02-18 22:25:47 +02002677 struct rdma_restrack_root *res;
Leon Romanovskyecc82c52017-06-18 14:39:59 +03002678
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02002679 const struct uapi_definition *driver_def;
Jason Gunthorped79af722019-01-10 14:02:24 -07002680
Parav Pandit01b67112018-11-16 03:50:57 +02002681 /*
Jason Gunthorped79af722019-01-10 14:02:24 -07002682 * Positive refcount indicates that the device is currently
2683 * registered and cannot be unregistered.
Parav Pandit01b67112018-11-16 03:50:57 +02002684 */
2685 refcount_t refcount;
2686 struct completion unreg_completion;
Jason Gunthorped0899892019-02-12 21:12:53 -07002687 struct work_struct unregistration_work;
Steve Wise3856ec42019-02-15 11:03:53 -08002688
2689 const struct rdma_link_ops *link_ops;
Parav Pandit4e0f7b92019-02-26 13:56:13 +02002690
2691 /* Protects compat_devs xarray modifications */
2692 struct mutex compat_devs_mutex;
2693 /* Maintains compat devices for each net namespace */
2694 struct xarray compat_devs;
Kamal Heibdd05cb82019-04-29 14:59:06 +03002695
2696 /* Used by iWarp CM */
2697 char iw_ifname[IFNAMSIZ];
2698 u32 iw_driver_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699};
2700
Jason Gunthorpe0e2d00e2019-06-13 21:38:18 -03002701struct ib_client_nl_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702struct ib_client {
Jason Gunthorpee59178d2019-02-06 22:41:52 -07002703 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 void (*add) (struct ib_device *);
Haggai Eran7c1eb452015-07-30 17:50:14 +03002705 void (*remove)(struct ib_device *, void *client_data);
Leon Romanovskydc1435c2019-05-17 15:43:10 +03002706 void (*rename)(struct ib_device *dev, void *client_data);
Jason Gunthorpe0e2d00e2019-06-13 21:38:18 -03002707 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2708 struct ib_client_nl_info *res);
2709 int (*get_global_nl_info)(struct ib_client_nl_info *res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
Yotam Kenneth9268f722015-07-30 17:50:15 +03002711 /* Returns the net_dev belonging to this ib_client and matching the
2712 * given parameters.
2713 * @dev: An RDMA device that the net_dev use for communication.
2714 * @port: A physical port number on the RDMA device.
2715 * @pkey: P_Key that the net_dev uses if applicable.
2716 * @gid: A GID that the net_dev uses to communicate.
2717 * @addr: An IP address the net_dev is configured with.
2718 * @client_data: The device's client data set by ib_set_client_data().
2719 *
2720 * An ib_client that implements a net_dev on top of RDMA devices
2721 * (such as IP over IB) should implement this callback, allowing the
2722 * rdma_cm module to find the right net_dev for a given request.
2723 *
2724 * The caller is responsible for calling dev_put on the returned
2725 * netdev. */
2726 struct net_device *(*get_net_dev_by_params)(
2727 struct ib_device *dev,
2728 u8 port,
2729 u16 pkey,
2730 const union ib_gid *gid,
2731 const struct sockaddr *addr,
2732 void *client_data);
Jason Gunthorpe621e55f2019-07-31 11:18:40 +03002733
2734 refcount_t uses;
2735 struct completion uses_zero;
Jason Gunthorpee59178d2019-02-06 22:41:52 -07002736 u32 client_id;
Gal Pressman6780c4f2019-01-22 10:08:22 +02002737
2738 /* kverbs are not required by the client */
2739 u8 no_kverbs_req:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740};
2741
Shiraz Saleema8082732019-05-06 08:53:33 -05002742/*
2743 * IB block DMA iterator
2744 *
2745 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
2746 * to a HW supported page size.
2747 */
2748struct ib_block_iter {
2749 /* internal states */
2750 struct scatterlist *__sg; /* sg holding the current aligned block */
2751 dma_addr_t __dma_addr; /* unaligned DMA address of this block */
2752 unsigned int __sg_nents; /* number of SG entries */
2753 unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
2754 unsigned int __pg_bit; /* alignment of current block */
2755};
2756
Leon Romanovsky459cc692019-01-30 12:49:11 +02002757struct ib_device *_ib_alloc_device(size_t size);
2758#define ib_alloc_device(drv_struct, member) \
2759 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2760 BUILD_BUG_ON_ZERO(offsetof( \
2761 struct drv_struct, member))), \
2762 struct drv_struct, member)
2763
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764void ib_dealloc_device(struct ib_device *device);
2765
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002766void ib_get_device_fw_str(struct ib_device *device, char *str);
Ira Weiny5fa76c22016-06-15 02:21:56 -04002767
Parav Panditea4baf72018-12-18 14:28:30 +02002768int ib_register_device(struct ib_device *device, const char *name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769void ib_unregister_device(struct ib_device *device);
Jason Gunthorped0899892019-02-12 21:12:53 -07002770void ib_unregister_driver(enum rdma_driver_id driver_id);
2771void ib_unregister_device_and_put(struct ib_device *device);
2772void ib_unregister_device_queued(struct ib_device *ib_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
2774int ib_register_client (struct ib_client *client);
2775void ib_unregister_client(struct ib_client *client);
2776
Shiraz Saleema8082732019-05-06 08:53:33 -05002777void __rdma_block_iter_start(struct ib_block_iter *biter,
2778 struct scatterlist *sglist,
2779 unsigned int nents,
2780 unsigned long pgsz);
2781bool __rdma_block_iter_next(struct ib_block_iter *biter);
2782
2783/**
2784 * rdma_block_iter_dma_address - get the aligned dma address of the current
2785 * block held by the block iterator.
2786 * @biter: block iterator holding the memory block
2787 */
2788static inline dma_addr_t
2789rdma_block_iter_dma_address(struct ib_block_iter *biter)
2790{
2791 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2792}
2793
2794/**
2795 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
2796 * @sglist: sglist to iterate over
2797 * @biter: block iterator holding the memory block
2798 * @nents: maximum number of sg entries to iterate over
2799 * @pgsz: best HW supported page size to use
2800 *
2801 * Callers may use rdma_block_iter_dma_address() to get each
2802 * blocks aligned DMA address.
2803 */
2804#define rdma_for_each_block(sglist, biter, nents, pgsz) \
2805 for (__rdma_block_iter_start(biter, sglist, nents, \
2806 pgsz); \
2807 __rdma_block_iter_next(biter);)
2808
Jason Gunthorpe0df91bb2019-02-06 22:41:53 -07002809/**
2810 * ib_get_client_data - Get IB client context
2811 * @device:Device to get context for
2812 * @client:Client to get context for
2813 *
2814 * ib_get_client_data() returns the client context data set with
2815 * ib_set_client_data(). This can only be called while the client is
2816 * registered to the device, once the ib_client remove() callback returns this
2817 * cannot be called.
2818 */
2819static inline void *ib_get_client_data(struct ib_device *device,
2820 struct ib_client *client)
2821{
2822 return xa_load(&device->client_data, client->client_id);
2823}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2825 void *data);
Kamal Heib521ed0d2018-12-10 21:09:30 +02002826void ib_set_device_ops(struct ib_device *device,
2827 const struct ib_device_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
Jason Gunthorpe5f9794d2018-09-16 20:43:08 +03002829int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
Michal Kalderonc043ff22019-10-30 11:44:12 +02002830 unsigned long pfn, unsigned long size, pgprot_t prot,
2831 struct rdma_user_mmap_entry *entry);
Michal Kalderon3411f9f2019-10-30 11:44:11 +02002832int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
2833 struct rdma_user_mmap_entry *entry,
2834 size_t length);
Yishai Hadas7a763d12019-12-12 12:02:36 +02002835int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
2836 struct rdma_user_mmap_entry *entry,
2837 size_t length, u32 min_pgoff,
2838 u32 max_pgoff);
2839
Michal Kalderon3411f9f2019-10-30 11:44:11 +02002840struct rdma_user_mmap_entry *
2841rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
2842 unsigned long pgoff);
2843struct rdma_user_mmap_entry *
2844rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
2845 struct vm_area_struct *vma);
2846void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
2847
2848void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
Jason Gunthorpe5f9794d2018-09-16 20:43:08 +03002849
Roland Dreiere2773c02005-07-07 17:57:10 -07002850static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2851{
2852 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2853}
2854
2855static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2856{
Yann Droneaud43c611652015-02-05 22:10:18 +01002857 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
Roland Dreiere2773c02005-07-07 17:57:10 -07002858}
2859
Matan Barakc66db312018-03-19 15:02:36 +02002860static inline bool ib_is_buffer_cleared(const void __user *p,
2861 size_t len)
Matan Barak301a7212015-12-15 20:30:10 +02002862{
Markus Elfring92d27ae2016-08-22 18:23:24 +02002863 bool ret;
Matan Barak301a7212015-12-15 20:30:10 +02002864 u8 *buf;
2865
2866 if (len > USHRT_MAX)
2867 return false;
2868
Markus Elfring92d27ae2016-08-22 18:23:24 +02002869 buf = memdup_user(p, len);
2870 if (IS_ERR(buf))
Matan Barak301a7212015-12-15 20:30:10 +02002871 return false;
2872
Matan Barak301a7212015-12-15 20:30:10 +02002873 ret = !memchr_inv(buf, 0, len);
Matan Barak301a7212015-12-15 20:30:10 +02002874 kfree(buf);
2875 return ret;
2876}
2877
Matan Barakc66db312018-03-19 15:02:36 +02002878static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2879 size_t offset,
2880 size_t len)
2881{
2882 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2883}
2884
Roland Dreier8a518662006-02-13 12:48:12 -08002885/**
Yishai Hadas1c774832018-06-20 17:11:39 +03002886 * ib_is_destroy_retryable - Check whether the uobject destruction
2887 * is retryable.
2888 * @ret: The initial destruction return code
2889 * @why: remove reason
2890 * @uobj: The uobject that is destroyed
2891 *
2892 * This function is a helper function that IB layer and low-level drivers
2893 * can use to consider whether the destruction of the given uobject is
2894 * retry-able.
2895 * It checks the original return code, if it wasn't success the destruction
2896 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2897 * the remove reason. (i.e. why).
2898 * Must be called with the object locked for destroy.
2899 */
2900static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2901 struct ib_uobject *uobj)
2902{
2903 return ret && (why == RDMA_REMOVE_DESTROY ||
2904 uobj->context->cleanup_retryable);
2905}
2906
2907/**
2908 * ib_destroy_usecnt - Called during destruction to check the usecnt
2909 * @usecnt: The usecnt atomic
2910 * @why: remove reason
2911 * @uobj: The uobject that is destroyed
2912 *
2913 * Non-zero usecnts will block destruction unless destruction was triggered by
2914 * a ucontext cleanup.
2915 */
2916static inline int ib_destroy_usecnt(atomic_t *usecnt,
2917 enum rdma_remove_reason why,
2918 struct ib_uobject *uobj)
2919{
2920 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2921 return -EBUSY;
2922 return 0;
2923}
2924
2925/**
Roland Dreier8a518662006-02-13 12:48:12 -08002926 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2927 * contains all required attributes and no attributes not allowed for
2928 * the given QP state transition.
2929 * @cur_state: Current QP state
2930 * @next_state: Next QP state
2931 * @type: QP type
2932 * @mask: Mask of supplied QP attributes
2933 *
2934 * This function is a helper function that a low-level driver's
2935 * modify_qp method can use to validate the consumer's input. It
2936 * checks that cur_state and next_state are valid QP states, that a
2937 * transition from cur_state to next_state is allowed by the IB spec,
2938 * and that the attribute mask supplied is allowed for the transition.
2939 */
Leon Romanovsky19b1f542018-03-11 13:51:35 +02002940bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Kamal Heibd31131b2018-10-02 16:11:21 +03002941 enum ib_qp_type type, enum ib_qp_attr_mask mask);
Roland Dreier8a518662006-02-13 12:48:12 -08002942
Leon Romanovskydcc98812017-08-17 15:50:36 +03002943void ib_register_event_handler(struct ib_event_handler *event_handler);
2944void ib_unregister_event_handler(struct ib_event_handler *event_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945void ib_dispatch_event(struct ib_event *event);
2946
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947int ib_query_port(struct ib_device *device,
2948 u8 port_num, struct ib_port_attr *port_attr);
2949
Eli Cohena3f5ada2010-09-27 17:51:10 -07002950enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2951 u8 port_num);
2952
Ira Weiny0cf18d72015-05-13 20:02:55 -04002953/**
Hal Rosenstock41390322015-06-29 09:57:00 -04002954 * rdma_cap_ib_switch - Check if the device is IB switch
2955 * @device: Device to check
2956 *
2957 * Device driver is responsible for setting is_switch bit on
2958 * in ib_device structure at init time.
2959 *
2960 * Return: true if the device is IB switch.
2961 */
2962static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2963{
2964 return device->is_switch;
2965}
2966
2967/**
Ira Weiny0cf18d72015-05-13 20:02:55 -04002968 * rdma_start_port - Return the first valid port number for the device
2969 * specified
2970 *
2971 * @device: Device to be checked
2972 *
2973 * Return start port number
2974 */
2975static inline u8 rdma_start_port(const struct ib_device *device)
2976{
Hal Rosenstock41390322015-06-29 09:57:00 -04002977 return rdma_cap_ib_switch(device) ? 0 : 1;
Ira Weiny0cf18d72015-05-13 20:02:55 -04002978}
2979
2980/**
Jason Gunthorpeea1075e2019-02-12 21:12:47 -07002981 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
2982 * @device - The struct ib_device * to iterate over
2983 * @iter - The unsigned int to store the port number
2984 */
2985#define rdma_for_each_port(device, iter) \
2986 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
2987 unsigned int, iter))); \
2988 iter <= rdma_end_port(device); (iter)++)
2989
2990/**
Ira Weiny0cf18d72015-05-13 20:02:55 -04002991 * rdma_end_port - Return the last valid port number for the device
2992 * specified
2993 *
2994 * @device: Device to be checked
2995 *
2996 * Return last port number
2997 */
2998static inline u8 rdma_end_port(const struct ib_device *device)
2999{
Hal Rosenstock41390322015-06-29 09:57:00 -04003000 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
Ira Weiny0cf18d72015-05-13 20:02:55 -04003001}
3002
Yuval Shaia24dc8312017-01-25 18:41:37 +02003003static inline int rdma_is_port_valid(const struct ib_device *device,
3004 unsigned int port)
3005{
3006 return (port >= rdma_start_port(device) &&
3007 port <= rdma_end_port(device));
3008}
3009
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03003010static inline bool rdma_is_grh_required(const struct ib_device *device,
3011 u8 port_num)
3012{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003013 return device->port_data[port_num].immutable.core_cap_flags &
3014 RDMA_CORE_PORT_IB_GRH_REQUIRED;
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03003015}
3016
Ira Weiny5ede9282015-05-31 17:15:29 -04003017static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02003018{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003019 return device->port_data[port_num].immutable.core_cap_flags &
3020 RDMA_CORE_CAP_PROT_IB;
Michael Wangde66be92015-05-05 14:50:19 +02003021}
3022
Ira Weiny5ede9282015-05-31 17:15:29 -04003023static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02003024{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003025 return device->port_data[port_num].immutable.core_cap_flags &
3026 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
Matan Barak7766a992015-12-23 14:56:50 +02003027}
3028
3029static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
3030{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003031 return device->port_data[port_num].immutable.core_cap_flags &
3032 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
Matan Barak7766a992015-12-23 14:56:50 +02003033}
3034
3035static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
3036{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003037 return device->port_data[port_num].immutable.core_cap_flags &
3038 RDMA_CORE_CAP_PROT_ROCE;
Michael Wangde66be92015-05-05 14:50:19 +02003039}
3040
Ira Weiny5ede9282015-05-31 17:15:29 -04003041static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02003042{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003043 return device->port_data[port_num].immutable.core_cap_flags &
3044 RDMA_CORE_CAP_PROT_IWARP;
Michael Wangde66be92015-05-05 14:50:19 +02003045}
3046
Ira Weiny5ede9282015-05-31 17:15:29 -04003047static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02003048{
Matan Barak7766a992015-12-23 14:56:50 +02003049 return rdma_protocol_ib(device, port_num) ||
3050 rdma_protocol_roce(device, port_num);
Michael Wangde66be92015-05-05 14:50:19 +02003051}
3052
Or Gerlitzaa773bd2017-01-24 13:02:35 +02003053static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3054{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003055 return device->port_data[port_num].immutable.core_cap_flags &
3056 RDMA_CORE_CAP_PROT_RAW_PACKET;
Or Gerlitzaa773bd2017-01-24 13:02:35 +02003057}
3058
Or Gerlitzce1e0552017-01-24 13:02:38 +02003059static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3060{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003061 return device->port_data[port_num].immutable.core_cap_flags &
3062 RDMA_CORE_CAP_PROT_USNIC;
Or Gerlitzce1e0552017-01-24 13:02:38 +02003063}
3064
Michael Wangc757dea2015-05-05 14:50:32 +02003065/**
Michael Wang296ec002015-05-18 10:41:45 +02003066 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
Michael Wangc757dea2015-05-05 14:50:32 +02003067 * Management Datagrams.
Michael Wang296ec002015-05-18 10:41:45 +02003068 * @device: Device to check
3069 * @port_num: Port number to check
Michael Wangc757dea2015-05-05 14:50:32 +02003070 *
Michael Wang296ec002015-05-18 10:41:45 +02003071 * Management Datagrams (MAD) are a required part of the InfiniBand
3072 * specification and are supported on all InfiniBand devices. A slightly
3073 * extended version are also supported on OPA interfaces.
Michael Wangc757dea2015-05-05 14:50:32 +02003074 *
Michael Wang296ec002015-05-18 10:41:45 +02003075 * Return: true if the port supports sending/receiving of MAD packets.
Michael Wangc757dea2015-05-05 14:50:32 +02003076 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003077static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
Michael Wangc757dea2015-05-05 14:50:32 +02003078{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003079 return device->port_data[port_num].immutable.core_cap_flags &
3080 RDMA_CORE_CAP_IB_MAD;
Michael Wangc757dea2015-05-05 14:50:32 +02003081}
3082
Michael Wang29541e32015-05-05 14:50:33 +02003083/**
Ira Weiny65995fe2015-06-06 14:38:32 -04003084 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3085 * Management Datagrams.
3086 * @device: Device to check
3087 * @port_num: Port number to check
3088 *
3089 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3090 * datagrams with their own versions. These OPA MADs share many but not all of
3091 * the characteristics of InfiniBand MADs.
3092 *
3093 * OPA MADs differ in the following ways:
3094 *
3095 * 1) MADs are variable size up to 2K
3096 * IBTA defined MADs remain fixed at 256 bytes
3097 * 2) OPA SMPs must carry valid PKeys
3098 * 3) OPA SMP packets are a different format
3099 *
3100 * Return: true if the port supports OPA MAD packet formats.
3101 */
3102static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3103{
Leon Romanovskyd3243da2019-03-10 17:27:46 +02003104 return device->port_data[port_num].immutable.core_cap_flags &
3105 RDMA_CORE_CAP_OPA_MAD;
Ira Weiny65995fe2015-06-06 14:38:32 -04003106}
3107
3108/**
Michael Wang296ec002015-05-18 10:41:45 +02003109 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3110 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3111 * @device: Device to check
3112 * @port_num: Port number to check
Michael Wang29541e32015-05-05 14:50:33 +02003113 *
Michael Wang296ec002015-05-18 10:41:45 +02003114 * Each InfiniBand node is required to provide a Subnet Management Agent
3115 * that the subnet manager can access. Prior to the fabric being fully
3116 * configured by the subnet manager, the SMA is accessed via a well known
3117 * interface called the Subnet Management Interface (SMI). This interface
3118 * uses directed route packets to communicate with the SM to get around the
3119 * chicken and egg problem of the SM needing to know what's on the fabric
3120 * in order to configure the fabric, and needing to configure the fabric in
3121 * order to send packets to the devices on the fabric. These directed
3122 * route packets do not need the fabric fully configured in order to reach
3123 * their destination. The SMI is the only method allowed to send
3124 * directed route packets on an InfiniBand fabric.
Michael Wang29541e32015-05-05 14:50:33 +02003125 *
Michael Wang296ec002015-05-18 10:41:45 +02003126 * Return: true if the port provides an SMI.
Michael Wang29541e32015-05-05 14:50:33 +02003127 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003128static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
Michael Wang29541e32015-05-05 14:50:33 +02003129{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003130 return device->port_data[port_num].immutable.core_cap_flags &
3131 RDMA_CORE_CAP_IB_SMI;
Michael Wang29541e32015-05-05 14:50:33 +02003132}
3133
Michael Wang72219cea2015-05-05 14:50:34 +02003134/**
3135 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3136 * Communication Manager.
Michael Wang296ec002015-05-18 10:41:45 +02003137 * @device: Device to check
3138 * @port_num: Port number to check
Michael Wang72219cea2015-05-05 14:50:34 +02003139 *
Michael Wang296ec002015-05-18 10:41:45 +02003140 * The InfiniBand Communication Manager is one of many pre-defined General
3141 * Service Agents (GSA) that are accessed via the General Service
3142 * Interface (GSI). It's role is to facilitate establishment of connections
3143 * between nodes as well as other management related tasks for established
3144 * connections.
Michael Wang72219cea2015-05-05 14:50:34 +02003145 *
Michael Wang296ec002015-05-18 10:41:45 +02003146 * Return: true if the port supports an IB CM (this does not guarantee that
3147 * a CM is actually running however).
Michael Wang72219cea2015-05-05 14:50:34 +02003148 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003149static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
Michael Wang72219cea2015-05-05 14:50:34 +02003150{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003151 return device->port_data[port_num].immutable.core_cap_flags &
3152 RDMA_CORE_CAP_IB_CM;
Michael Wang72219cea2015-05-05 14:50:34 +02003153}
3154
Michael Wang04215332015-05-05 14:50:35 +02003155/**
3156 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3157 * Communication Manager.
Michael Wang296ec002015-05-18 10:41:45 +02003158 * @device: Device to check
3159 * @port_num: Port number to check
Michael Wang04215332015-05-05 14:50:35 +02003160 *
Michael Wang296ec002015-05-18 10:41:45 +02003161 * Similar to above, but specific to iWARP connections which have a different
3162 * managment protocol than InfiniBand.
Michael Wang04215332015-05-05 14:50:35 +02003163 *
Michael Wang296ec002015-05-18 10:41:45 +02003164 * Return: true if the port supports an iWARP CM (this does not guarantee that
3165 * a CM is actually running however).
Michael Wang04215332015-05-05 14:50:35 +02003166 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003167static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
Michael Wang04215332015-05-05 14:50:35 +02003168{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003169 return device->port_data[port_num].immutable.core_cap_flags &
3170 RDMA_CORE_CAP_IW_CM;
Michael Wang04215332015-05-05 14:50:35 +02003171}
3172
Michael Wangfe53ba22015-05-05 14:50:36 +02003173/**
3174 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3175 * Subnet Administration.
Michael Wang296ec002015-05-18 10:41:45 +02003176 * @device: Device to check
3177 * @port_num: Port number to check
Michael Wangfe53ba22015-05-05 14:50:36 +02003178 *
Michael Wang296ec002015-05-18 10:41:45 +02003179 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3180 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
3181 * fabrics, devices should resolve routes to other hosts by contacting the
3182 * SA to query the proper route.
Michael Wangfe53ba22015-05-05 14:50:36 +02003183 *
Michael Wang296ec002015-05-18 10:41:45 +02003184 * Return: true if the port should act as a client to the fabric Subnet
3185 * Administration interface. This does not imply that the SA service is
3186 * running locally.
Michael Wangfe53ba22015-05-05 14:50:36 +02003187 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003188static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
Michael Wangfe53ba22015-05-05 14:50:36 +02003189{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003190 return device->port_data[port_num].immutable.core_cap_flags &
3191 RDMA_CORE_CAP_IB_SA;
Michael Wangfe53ba22015-05-05 14:50:36 +02003192}
3193
Michael Wanga31ad3b2015-05-05 14:50:37 +02003194/**
3195 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3196 * Multicast.
Michael Wang296ec002015-05-18 10:41:45 +02003197 * @device: Device to check
3198 * @port_num: Port number to check
Michael Wanga31ad3b2015-05-05 14:50:37 +02003199 *
Michael Wang296ec002015-05-18 10:41:45 +02003200 * InfiniBand multicast registration is more complex than normal IPv4 or
3201 * IPv6 multicast registration. Each Host Channel Adapter must register
3202 * with the Subnet Manager when it wishes to join a multicast group. It
3203 * should do so only once regardless of how many queue pairs it subscribes
3204 * to this group. And it should leave the group only after all queue pairs
3205 * attached to the group have been detached.
Michael Wanga31ad3b2015-05-05 14:50:37 +02003206 *
Michael Wang296ec002015-05-18 10:41:45 +02003207 * Return: true if the port must undertake the additional adminstrative
3208 * overhead of registering/unregistering with the SM and tracking of the
3209 * total number of queue pairs attached to the multicast group.
Michael Wanga31ad3b2015-05-05 14:50:37 +02003210 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003211static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
Michael Wanga31ad3b2015-05-05 14:50:37 +02003212{
3213 return rdma_cap_ib_sa(device, port_num);
3214}
3215
Michael Wangbc0f1d72015-05-05 14:50:38 +02003216/**
Michael Wang30a74ef2015-05-05 14:50:39 +02003217 * rdma_cap_af_ib - Check if the port of device has the capability
3218 * Native Infiniband Address.
Michael Wang296ec002015-05-18 10:41:45 +02003219 * @device: Device to check
3220 * @port_num: Port number to check
Michael Wang30a74ef2015-05-05 14:50:39 +02003221 *
Michael Wang296ec002015-05-18 10:41:45 +02003222 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3223 * GID. RoCE uses a different mechanism, but still generates a GID via
3224 * a prescribed mechanism and port specific data.
Michael Wang30a74ef2015-05-05 14:50:39 +02003225 *
Michael Wang296ec002015-05-18 10:41:45 +02003226 * Return: true if the port uses a GID address to identify devices on the
3227 * network.
Michael Wang30a74ef2015-05-05 14:50:39 +02003228 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003229static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
Michael Wang30a74ef2015-05-05 14:50:39 +02003230{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003231 return device->port_data[port_num].immutable.core_cap_flags &
3232 RDMA_CORE_CAP_AF_IB;
Michael Wang30a74ef2015-05-05 14:50:39 +02003233}
3234
3235/**
Michael Wang227128f2015-05-05 14:50:40 +02003236 * rdma_cap_eth_ah - Check if the port of device has the capability
Michael Wang296ec002015-05-18 10:41:45 +02003237 * Ethernet Address Handle.
3238 * @device: Device to check
3239 * @port_num: Port number to check
Michael Wang227128f2015-05-05 14:50:40 +02003240 *
Michael Wang296ec002015-05-18 10:41:45 +02003241 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3242 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3243 * port. Normally, packet headers are generated by the sending host
3244 * adapter, but when sending connectionless datagrams, we must manually
3245 * inject the proper headers for the fabric we are communicating over.
Michael Wang227128f2015-05-05 14:50:40 +02003246 *
Michael Wang296ec002015-05-18 10:41:45 +02003247 * Return: true if we are running as a RoCE port and must force the
3248 * addition of a Global Route Header built from our Ethernet Address
3249 * Handle into our header list for connectionless packets.
Michael Wang227128f2015-05-05 14:50:40 +02003250 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003251static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
Michael Wang227128f2015-05-05 14:50:40 +02003252{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003253 return device->port_data[port_num].immutable.core_cap_flags &
3254 RDMA_CORE_CAP_ETH_AH;
Michael Wang227128f2015-05-05 14:50:40 +02003255}
3256
3257/**
Dasaratharaman Chandramouli94d595c2017-03-20 19:38:09 -04003258 * rdma_cap_opa_ah - Check if the port of device supports
3259 * OPA Address handles
3260 * @device: Device to check
3261 * @port_num: Port number to check
3262 *
3263 * Return: true if we are running on an OPA device which supports
3264 * the extended OPA addressing.
3265 */
3266static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3267{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003268 return (device->port_data[port_num].immutable.core_cap_flags &
Dasaratharaman Chandramouli94d595c2017-03-20 19:38:09 -04003269 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3270}
3271
3272/**
Ira Weiny337877a2015-06-06 14:38:29 -04003273 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3274 *
3275 * @device: Device
3276 * @port_num: Port number
3277 *
3278 * This MAD size includes the MAD headers and MAD payload. No other headers
3279 * are included.
3280 *
3281 * Return the max MAD size required by the Port. Will return 0 if the port
3282 * does not support MADs
3283 */
3284static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3285{
Jason Gunthorpe8ceb1352019-02-12 21:12:48 -07003286 return device->port_data[port_num].immutable.max_mad_size;
Ira Weiny337877a2015-06-06 14:38:29 -04003287}
3288
Matan Barak03db3a22015-07-30 18:33:26 +03003289/**
3290 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3291 * @device: Device to check
3292 * @port_num: Port number to check
3293 *
3294 * RoCE GID table mechanism manages the various GIDs for a device.
3295 *
3296 * NOTE: if allocating the port's GID table has failed, this call will still
3297 * return true, but any RoCE GID table API will fail.
3298 *
3299 * Return: true if the port uses RoCE GID table mechanism in order to manage
3300 * its GIDs.
3301 */
3302static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3303 u8 port_num)
3304{
3305 return rdma_protocol_roce(device, port_num) &&
Kamal Heib3023a1e2018-12-10 21:09:48 +02003306 device->ops.add_gid && device->ops.del_gid;
Matan Barak03db3a22015-07-30 18:33:26 +03003307}
3308
Christoph Hellwig002516e2016-05-03 18:01:05 +02003309/*
3310 * Check if the device supports READ W/ INVALIDATE.
3311 */
3312static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3313{
3314 /*
3315 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3316 * has support for it yet.
3317 */
3318 return rdma_protocol_iwarp(dev, port_num);
3319}
3320
Shiraz Saleem4a353392019-05-06 08:53:32 -05003321/**
3322 * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
3323 *
3324 * @addr: address
3325 * @pgsz_bitmap: bitmap of HW supported page sizes
3326 */
3327static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3328 unsigned long pgsz_bitmap)
3329{
3330 unsigned long align;
3331 unsigned long pgsz;
3332
3333 align = addr & -addr;
3334
3335 /* Find page bit such that addr is aligned to the highest supported
3336 * HW page size
3337 */
3338 pgsz = pgsz_bitmap & ~(-align << 1);
3339 if (!pgsz)
3340 return __ffs(pgsz_bitmap);
3341
3342 return __fls(pgsz);
3343}
3344
Eli Cohen50174a72016-03-11 22:58:38 +02003345int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3346 int state);
3347int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3348 struct ifla_vf_info *info);
3349int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3350 struct ifla_vf_stats *stats);
Danit Goldbergbfcb3c5d2019-11-06 15:08:32 +02003351int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
3352 struct ifla_vf_guid *node_guid,
3353 struct ifla_vf_guid *port_guid);
Eli Cohen50174a72016-03-11 22:58:38 +02003354int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3355 int type);
3356
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357int ib_query_pkey(struct ib_device *device,
3358 u8 port_num, u16 index, u16 *pkey);
3359
3360int ib_modify_device(struct ib_device *device,
3361 int device_modify_mask,
3362 struct ib_device_modify *device_modify);
3363
3364int ib_modify_port(struct ib_device *device,
3365 u8 port_num, int port_modify_mask,
3366 struct ib_port_modify *port_modify);
3367
Yosef Etigin5eb620c2007-05-14 07:26:51 +03003368int ib_find_gid(struct ib_device *device, union ib_gid *gid,
Parav Panditb26c4a12018-03-13 16:06:12 +02003369 u8 *port_num, u16 *index);
Yosef Etigin5eb620c2007-05-14 07:26:51 +03003370
3371int ib_find_pkey(struct ib_device *device,
3372 u8 port_num, u16 pkey, u16 *index);
3373
Christoph Hellwiged082d32016-09-05 12:56:17 +02003374enum ib_pd_flags {
3375 /*
3376 * Create a memory registration for all memory in the system and place
3377 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3378 * ULPs to avoid the overhead of dynamic MRs.
3379 *
3380 * This flag is generally considered unsafe and must only be used in
3381 * extremly trusted environments. Every use of it will log a warning
3382 * in the kernel log.
3383 */
3384 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3385};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386
Christoph Hellwiged082d32016-09-05 12:56:17 +02003387struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3388 const char *caller);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003389
Christoph Hellwiged082d32016-09-05 12:56:17 +02003390#define ib_alloc_pd(device, flags) \
Leon Romanovskye4496442018-01-28 11:17:18 +02003391 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003392
3393/**
3394 * ib_dealloc_pd_user - Deallocate kernel/user PD
3395 * @pd: The protection domain
3396 * @udata: Valid user data or NULL for kernel objects
3397 */
3398void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3399
3400/**
3401 * ib_dealloc_pd - Deallocate kernel PD
3402 * @pd: The protection domain
3403 *
3404 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3405 */
3406static inline void ib_dealloc_pd(struct ib_pd *pd)
3407{
3408 ib_dealloc_pd_user(pd, NULL);
3409}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410
Gal Pressmanb090c4e2018-12-12 11:09:05 +02003411enum rdma_create_ah_flags {
3412 /* In a sleepable context */
3413 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3414};
3415
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416/**
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -04003417 * rdma_create_ah - Creates an address handle for the given address vector.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 * @pd: The protection domain associated with the address handle.
3419 * @ah_attr: The attributes of the address vector.
Gal Pressmanb090c4e2018-12-12 11:09:05 +02003420 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 *
3422 * The address handle is used to reference a local or global destination
3423 * in all UD QP post sends.
3424 */
Gal Pressmanb090c4e2018-12-12 11:09:05 +02003425struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3426 u32 flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
3428/**
Parav Pandit5cda6582017-10-16 08:45:12 +03003429 * rdma_create_user_ah - Creates an address handle for the given address vector.
3430 * It resolves destination mac address for ah attribute of RoCE type.
3431 * @pd: The protection domain associated with the address handle.
3432 * @ah_attr: The attributes of the address vector.
3433 * @udata: pointer to user's input output buffer information need by
3434 * provider driver.
3435 *
3436 * It returns 0 on success and returns appropriate error code on error.
3437 * The address handle is used to reference a local or global destination
3438 * in all UD QP post sends.
3439 */
3440struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3441 struct rdma_ah_attr *ah_attr,
3442 struct ib_udata *udata);
3443/**
Moni Shoua850d8fd2016-11-10 11:30:56 +02003444 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3445 * work completion.
3446 * @hdr: the L3 header to parse
3447 * @net_type: type of header to parse
3448 * @sgid: place to store source gid
3449 * @dgid: place to store destination gid
3450 */
3451int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3452 enum rdma_network_type net_type,
3453 union ib_gid *sgid, union ib_gid *dgid);
3454
3455/**
3456 * ib_get_rdma_header_version - Get the header version
3457 * @hdr: the L3 header to parse
3458 */
3459int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3460
3461/**
Parav Panditf6bdb142017-11-14 14:52:17 +02003462 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
Sean Hefty4e00d692006-06-17 20:37:39 -07003463 * work completion.
3464 * @device: Device on which the received message arrived.
3465 * @port_num: Port on which the received message arrived.
3466 * @wc: Work completion associated with the received message.
3467 * @grh: References the received global route header. This parameter is
3468 * ignored unless the work completion indicates that the GRH is valid.
3469 * @ah_attr: Returned attributes that can be used when creating an address
3470 * handle for replying to the message.
Parav Panditb7403212018-06-19 10:59:14 +03003471 * When ib_init_ah_attr_from_wc() returns success,
3472 * (a) for IB link layer it optionally contains a reference to SGID attribute
3473 * when GRH is present for IB link layer.
3474 * (b) for RoCE link layer it contains a reference to SGID attribute.
3475 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3476 * attributes which are initialized using ib_init_ah_attr_from_wc().
3477 *
Sean Hefty4e00d692006-06-17 20:37:39 -07003478 */
Parav Panditf6bdb142017-11-14 14:52:17 +02003479int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3480 const struct ib_wc *wc, const struct ib_grh *grh,
3481 struct rdma_ah_attr *ah_attr);
Sean Hefty4e00d692006-06-17 20:37:39 -07003482
3483/**
Hal Rosenstock513789e2005-07-27 11:45:34 -07003484 * ib_create_ah_from_wc - Creates an address handle associated with the
3485 * sender of the specified work completion.
3486 * @pd: The protection domain associated with the address handle.
3487 * @wc: Work completion information associated with a received message.
3488 * @grh: References the received global route header. This parameter is
3489 * ignored unless the work completion indicates that the GRH is valid.
3490 * @port_num: The outbound port number to associate with the address.
3491 *
3492 * The address handle is used to reference a local or global destination
3493 * in all UD QP post sends.
3494 */
Ira Weiny73cdaae2015-05-31 17:15:31 -04003495struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3496 const struct ib_grh *grh, u8 port_num);
Hal Rosenstock513789e2005-07-27 11:45:34 -07003497
3498/**
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -04003499 * rdma_modify_ah - Modifies the address vector associated with an address
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 * handle.
3501 * @ah: The address handle to modify.
3502 * @ah_attr: The new address vector attributes to associate with the
3503 * address handle.
3504 */
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -04003505int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506
3507/**
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -04003508 * rdma_query_ah - Queries the address vector associated with an address
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 * handle.
3510 * @ah: The address handle to query.
3511 * @ah_attr: The address vector attributes associated with the address
3512 * handle.
3513 */
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -04003514int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515
Gal Pressman2553ba22018-12-12 11:09:06 +02003516enum rdma_destroy_ah_flags {
3517 /* In a sleepable context */
3518 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3519};
3520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003522 * rdma_destroy_ah_user - Destroys an address handle.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 * @ah: The address handle to destroy.
Gal Pressman2553ba22018-12-12 11:09:06 +02003524 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003525 * @udata: Valid user data or NULL for kernel objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003527int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3528
3529/**
3530 * rdma_destroy_ah - Destroys an kernel address handle.
3531 * @ah: The address handle to destroy.
3532 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3533 *
3534 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3535 */
3536static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3537{
3538 return rdma_destroy_ah_user(ah, flags, NULL);
3539}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
3541/**
Roland Dreierd41fcc62005-08-18 12:23:08 -07003542 * ib_create_srq - Creates a SRQ associated with the specified protection
3543 * domain.
3544 * @pd: The protection domain associated with the SRQ.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08003545 * @srq_init_attr: A list of initial attributes required to create the
3546 * SRQ. If SRQ creation succeeds, then the attributes are updated to
3547 * the actual capabilities of the created SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07003548 *
3549 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3550 * requested size of the SRQ, and set to the actual values allocated
3551 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
3552 * will always be at least as large as the requested values.
3553 */
3554struct ib_srq *ib_create_srq(struct ib_pd *pd,
3555 struct ib_srq_init_attr *srq_init_attr);
3556
3557/**
3558 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3559 * @srq: The SRQ to modify.
3560 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3561 * the current values of selected SRQ attributes are returned.
3562 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3563 * are being modified.
3564 *
3565 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3566 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3567 * the number of receives queued drops below the limit.
3568 */
3569int ib_modify_srq(struct ib_srq *srq,
3570 struct ib_srq_attr *srq_attr,
3571 enum ib_srq_attr_mask srq_attr_mask);
3572
3573/**
3574 * ib_query_srq - Returns the attribute list and current values for the
3575 * specified SRQ.
3576 * @srq: The SRQ to query.
3577 * @srq_attr: The attributes of the specified SRQ.
3578 */
3579int ib_query_srq(struct ib_srq *srq,
3580 struct ib_srq_attr *srq_attr);
3581
3582/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003583 * ib_destroy_srq_user - Destroys the specified SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07003584 * @srq: The SRQ to destroy.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003585 * @udata: Valid user data or NULL for kernel objects
Roland Dreierd41fcc62005-08-18 12:23:08 -07003586 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003587int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3588
3589/**
3590 * ib_destroy_srq - Destroys the specified kernel SRQ.
3591 * @srq: The SRQ to destroy.
3592 *
3593 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3594 */
3595static inline int ib_destroy_srq(struct ib_srq *srq)
3596{
3597 return ib_destroy_srq_user(srq, NULL);
3598}
Roland Dreierd41fcc62005-08-18 12:23:08 -07003599
3600/**
3601 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3602 * @srq: The SRQ to post the work request on.
3603 * @recv_wr: A list of work requests to post on the receive queue.
3604 * @bad_recv_wr: On an immediate failure, this parameter will reference
3605 * the work request that failed to be posted on the QP.
3606 */
3607static inline int ib_post_srq_recv(struct ib_srq *srq,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003608 const struct ib_recv_wr *recv_wr,
3609 const struct ib_recv_wr **bad_recv_wr)
Roland Dreierd41fcc62005-08-18 12:23:08 -07003610{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003611 const struct ib_recv_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003612
Kamal Heib3023a1e2018-12-10 21:09:48 +02003613 return srq->device->ops.post_srq_recv(srq, recv_wr,
3614 bad_recv_wr ? : &dummy);
Roland Dreierd41fcc62005-08-18 12:23:08 -07003615}
3616
3617/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003618 * ib_create_qp_user - Creates a QP associated with the specified protection
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619 * domain.
3620 * @pd: The protection domain associated with the QP.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08003621 * @qp_init_attr: A list of initial attributes required to create the
3622 * QP. If QP creation succeeds, then the attributes are updated to
3623 * the actual capabilities of the created QP.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003624 * @udata: Valid user data or NULL for kernel objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003626struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
3627 struct ib_qp_init_attr *qp_init_attr,
3628 struct ib_udata *udata);
3629
3630/**
3631 * ib_create_qp - Creates a kernel QP associated with the specified protection
3632 * domain.
3633 * @pd: The protection domain associated with the QP.
3634 * @qp_init_attr: A list of initial attributes required to create the
3635 * QP. If QP creation succeeds, then the attributes are updated to
3636 * the actual capabilities of the created QP.
3637 * @udata: Valid user data or NULL for kernel objects
3638 *
3639 * NOTE: for user qp use ib_create_qp_user with valid udata!
3640 */
3641static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3642 struct ib_qp_init_attr *qp_init_attr)
3643{
3644 return ib_create_qp_user(pd, qp_init_attr, NULL);
3645}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646
3647/**
Parav Pandita512c2f2017-05-23 11:26:08 +03003648 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3649 * @qp: The QP to modify.
3650 * @attr: On input, specifies the QP attributes to modify. On output,
3651 * the current values of selected QP attributes are returned.
3652 * @attr_mask: A bit-mask used to specify which attributes of the QP
3653 * are being modified.
3654 * @udata: pointer to user's input output buffer information
3655 * are being modified.
3656 * It returns 0 on success and returns appropriate error code on error.
3657 */
3658int ib_modify_qp_with_udata(struct ib_qp *qp,
3659 struct ib_qp_attr *attr,
3660 int attr_mask,
3661 struct ib_udata *udata);
3662
3663/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 * ib_modify_qp - Modifies the attributes for the specified QP and then
3665 * transitions the QP to the given state.
3666 * @qp: The QP to modify.
3667 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3668 * the current values of selected QP attributes are returned.
3669 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3670 * are being modified.
3671 */
3672int ib_modify_qp(struct ib_qp *qp,
3673 struct ib_qp_attr *qp_attr,
3674 int qp_attr_mask);
3675
3676/**
3677 * ib_query_qp - Returns the attribute list and current values for the
3678 * specified QP.
3679 * @qp: The QP to query.
3680 * @qp_attr: The attributes of the specified QP.
3681 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3682 * @qp_init_attr: Additional attributes of the selected QP.
3683 *
3684 * The qp_attr_mask may be used to limit the query to gathering only the
3685 * selected attributes.
3686 */
3687int ib_query_qp(struct ib_qp *qp,
3688 struct ib_qp_attr *qp_attr,
3689 int qp_attr_mask,
3690 struct ib_qp_init_attr *qp_init_attr);
3691
3692/**
3693 * ib_destroy_qp - Destroys the specified QP.
3694 * @qp: The QP to destroy.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003695 * @udata: Valid udata or NULL for kernel objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003697int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3698
3699/**
3700 * ib_destroy_qp - Destroys the specified kernel QP.
3701 * @qp: The QP to destroy.
3702 *
3703 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3704 */
3705static inline int ib_destroy_qp(struct ib_qp *qp)
3706{
3707 return ib_destroy_qp_user(qp, NULL);
3708}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709
3710/**
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003711 * ib_open_qp - Obtain a reference to an existing sharable QP.
3712 * @xrcd - XRC domain
3713 * @qp_open_attr: Attributes identifying the QP to open.
3714 *
3715 * Returns a reference to a sharable QP.
3716 */
3717struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3718 struct ib_qp_open_attr *qp_open_attr);
3719
3720/**
3721 * ib_close_qp - Release an external reference to a QP.
Sean Heftyd3d72d92011-05-26 23:06:44 -07003722 * @qp: The QP handle to release
3723 *
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003724 * The opened QP handle is released by the caller. The underlying
3725 * shared QP is not destroyed until all internal references are released.
Sean Heftyd3d72d92011-05-26 23:06:44 -07003726 */
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003727int ib_close_qp(struct ib_qp *qp);
Sean Heftyd3d72d92011-05-26 23:06:44 -07003728
3729/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730 * ib_post_send - Posts a list of work requests to the send queue of
3731 * the specified QP.
3732 * @qp: The QP to post the work request on.
3733 * @send_wr: A list of work requests to post on the send queue.
3734 * @bad_send_wr: On an immediate failure, this parameter will reference
3735 * the work request that failed to be posted on the QP.
Bart Van Assche55464d42009-12-09 14:20:04 -08003736 *
3737 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3738 * error is returned, the QP state shall not be affected,
3739 * ib_post_send() will return an immediate error after queueing any
3740 * earlier work requests in the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 */
3742static inline int ib_post_send(struct ib_qp *qp,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003743 const struct ib_send_wr *send_wr,
3744 const struct ib_send_wr **bad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003746 const struct ib_send_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003747
Kamal Heib3023a1e2018-12-10 21:09:48 +02003748 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749}
3750
3751/**
3752 * ib_post_recv - Posts a list of work requests to the receive queue of
3753 * the specified QP.
3754 * @qp: The QP to post the work request on.
3755 * @recv_wr: A list of work requests to post on the receive queue.
3756 * @bad_recv_wr: On an immediate failure, this parameter will reference
3757 * the work request that failed to be posted on the QP.
3758 */
3759static inline int ib_post_recv(struct ib_qp *qp,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003760 const struct ib_recv_wr *recv_wr,
3761 const struct ib_recv_wr **bad_recv_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003763 const struct ib_recv_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003764
Kamal Heib3023a1e2018-12-10 21:09:48 +02003765 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766}
3767
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003768struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3769 int nr_cqe, int comp_vector,
3770 enum ib_poll_context poll_ctx,
3771 const char *caller, struct ib_udata *udata);
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02003772
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003773/**
3774 * ib_alloc_cq_user: Allocate kernel/user CQ
3775 * @dev: The IB device
3776 * @private: Private data attached to the CQE
3777 * @nr_cqe: Number of CQEs in the CQ
3778 * @comp_vector: Completion vector used for the IRQs
3779 * @poll_ctx: Context used for polling the CQ
3780 * @udata: Valid user data or NULL for kernel objects
3781 */
3782static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3783 void *private, int nr_cqe,
3784 int comp_vector,
3785 enum ib_poll_context poll_ctx,
3786 struct ib_udata *udata)
3787{
3788 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3789 KBUILD_MODNAME, udata);
3790}
3791
3792/**
3793 * ib_alloc_cq: Allocate kernel CQ
3794 * @dev: The IB device
3795 * @private: Private data attached to the CQE
3796 * @nr_cqe: Number of CQEs in the CQ
3797 * @comp_vector: Completion vector used for the IRQs
3798 * @poll_ctx: Context used for polling the CQ
3799 *
3800 * NOTE: for user cq use ib_alloc_cq_user with valid udata!
3801 */
3802static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3803 int nr_cqe, int comp_vector,
3804 enum ib_poll_context poll_ctx)
3805{
3806 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3807 NULL);
3808}
3809
Chuck Lever20cf4e02019-07-29 13:22:09 -04003810struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3811 int nr_cqe, enum ib_poll_context poll_ctx,
3812 const char *caller);
3813
3814/**
3815 * ib_alloc_cq_any: Allocate kernel CQ
3816 * @dev: The IB device
3817 * @private: Private data attached to the CQE
3818 * @nr_cqe: Number of CQEs in the CQ
3819 * @poll_ctx: Context used for polling the CQ
3820 */
3821static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3822 void *private, int nr_cqe,
3823 enum ib_poll_context poll_ctx)
3824{
3825 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3826 KBUILD_MODNAME);
3827}
3828
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003829/**
3830 * ib_free_cq_user - Free kernel/user CQ
3831 * @cq: The CQ to free
3832 * @udata: Valid user data or NULL for kernel objects
3833 */
3834void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3835
3836/**
3837 * ib_free_cq - Free kernel CQ
3838 * @cq: The CQ to free
3839 *
3840 * NOTE: for user cq use ib_free_cq_user with valid udata!
3841 */
3842static inline void ib_free_cq(struct ib_cq *cq)
3843{
3844 ib_free_cq_user(cq, NULL);
3845}
3846
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08003847int ib_process_cq_direct(struct ib_cq *cq, int budget);
3848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849/**
3850 * ib_create_cq - Creates a CQ on the specified device.
3851 * @device: The device on which to create the CQ.
3852 * @comp_handler: A user-specified callback that is invoked when a
3853 * completion event occurs on the CQ.
3854 * @event_handler: A user-specified callback that is invoked when an
3855 * asynchronous event not associated with a completion occurs on the CQ.
3856 * @cq_context: Context associated with the CQ returned to the user via
3857 * the associated completion and event handlers.
Matan Barak8e372102015-06-11 16:35:21 +03003858 * @cq_attr: The attributes the CQ should be created upon.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 *
3860 * Users can examine the cq structure to determine the actual CQ size.
3861 */
Bharat Potnuri7350cdd2018-06-15 20:52:33 +05303862struct ib_cq *__ib_create_cq(struct ib_device *device,
3863 ib_comp_handler comp_handler,
3864 void (*event_handler)(struct ib_event *, void *),
3865 void *cq_context,
3866 const struct ib_cq_init_attr *cq_attr,
3867 const char *caller);
3868#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3869 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870
3871/**
3872 * ib_resize_cq - Modifies the capacity of the CQ.
3873 * @cq: The CQ to resize.
3874 * @cqe: The minimum size of the CQ.
3875 *
3876 * Users can examine the cq structure to determine the actual CQ size.
3877 */
3878int ib_resize_cq(struct ib_cq *cq, int cqe);
3879
3880/**
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02003881 * rdma_set_cq_moderation - Modifies moderation params of the CQ
Eli Cohen2dd57162008-04-16 21:09:33 -07003882 * @cq: The CQ to modify.
3883 * @cq_count: number of CQEs that will trigger an event
3884 * @cq_period: max period of time in usec before triggering an event
3885 *
3886 */
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02003887int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
Eli Cohen2dd57162008-04-16 21:09:33 -07003888
3889/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003890 * ib_destroy_cq_user - Destroys the specified CQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 * @cq: The CQ to destroy.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003892 * @udata: Valid user data or NULL for kernel objects
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003894int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3895
3896/**
3897 * ib_destroy_cq - Destroys the specified kernel CQ.
3898 * @cq: The CQ to destroy.
3899 *
3900 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
3901 */
Leon Romanovsky890ac8d2019-05-20 09:54:21 +03003902static inline void ib_destroy_cq(struct ib_cq *cq)
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003903{
Leon Romanovsky890ac8d2019-05-20 09:54:21 +03003904 ib_destroy_cq_user(cq, NULL);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03003905}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906
3907/**
3908 * ib_poll_cq - poll a CQ for completion(s)
3909 * @cq:the CQ being polled
3910 * @num_entries:maximum number of completions to return
3911 * @wc:array of at least @num_entries &struct ib_wc where completions
3912 * will be returned
3913 *
3914 * Poll a CQ for (possibly multiple) completions. If the return value
3915 * is < 0, an error occurred. If the return value is >= 0, it is the
3916 * number of completions returned. If the return value is
3917 * non-negative and < num_entries, then the CQ was emptied.
3918 */
3919static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3920 struct ib_wc *wc)
3921{
Kamal Heib3023a1e2018-12-10 21:09:48 +02003922 return cq->device->ops.poll_cq(cq, num_entries, wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923}
3924
3925/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 * ib_req_notify_cq - Request completion notification on a CQ.
3927 * @cq: The CQ to generate an event for.
Roland Dreiered23a722007-05-06 21:02:48 -07003928 * @flags:
3929 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3930 * to request an event on the next solicited event or next work
3931 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3932 * may also be |ed in to request a hint about missed events, as
3933 * described below.
3934 *
3935 * Return Value:
3936 * < 0 means an error occurred while requesting notification
3937 * == 0 means notification was requested successfully, and if
3938 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3939 * were missed and it is safe to wait for another event. In
3940 * this case is it guaranteed that any work completions added
3941 * to the CQ since the last CQ poll will trigger a completion
3942 * notification event.
3943 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3944 * in. It means that the consumer must poll the CQ again to
3945 * make sure it is empty to avoid missing an event because of a
3946 * race between requesting notification and an entry being
3947 * added to the CQ. This return value means it is possible
3948 * (but not guaranteed) that a work completion has been added
3949 * to the CQ since the last poll without triggering a
3950 * completion notification event.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 */
3952static inline int ib_req_notify_cq(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07003953 enum ib_cq_notify_flags flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954{
Kamal Heib3023a1e2018-12-10 21:09:48 +02003955 return cq->device->ops.req_notify_cq(cq, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956}
3957
3958/**
3959 * ib_req_ncomp_notif - Request completion notification when there are
3960 * at least the specified number of unreaped completions on the CQ.
3961 * @cq: The CQ to generate an event for.
3962 * @wc_cnt: The number of unreaped completions that should be on the
3963 * CQ before an event is generated.
3964 */
3965static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3966{
Kamal Heib3023a1e2018-12-10 21:09:48 +02003967 return cq->device->ops.req_ncomp_notif ?
3968 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969 -ENOSYS;
3970}
3971
3972/**
Ralph Campbell9b513092006-12-12 14:27:41 -08003973 * ib_dma_mapping_error - check a DMA addr for error
3974 * @dev: The device for which the dma_addr was created
3975 * @dma_addr: The DMA address to check
3976 */
3977static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3978{
Bart Van Assche0957c292017-03-07 22:56:53 +00003979 return dma_mapping_error(dev->dma_device, dma_addr);
Ralph Campbell9b513092006-12-12 14:27:41 -08003980}
3981
3982/**
3983 * ib_dma_map_single - Map a kernel virtual address to DMA address
3984 * @dev: The device for which the dma_addr is to be created
3985 * @cpu_addr: The kernel virtual address
3986 * @size: The size of the region in bytes
3987 * @direction: The direction of the DMA
3988 */
3989static inline u64 ib_dma_map_single(struct ib_device *dev,
3990 void *cpu_addr, size_t size,
3991 enum dma_data_direction direction)
3992{
Bart Van Assche0957c292017-03-07 22:56:53 +00003993 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003994}
3995
3996/**
3997 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3998 * @dev: The device for which the DMA address was created
3999 * @addr: The DMA address
4000 * @size: The size of the region in bytes
4001 * @direction: The direction of the DMA
4002 */
4003static inline void ib_dma_unmap_single(struct ib_device *dev,
4004 u64 addr, size_t size,
4005 enum dma_data_direction direction)
4006{
Bart Van Assche0957c292017-03-07 22:56:53 +00004007 dma_unmap_single(dev->dma_device, addr, size, direction);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004008}
4009
Ralph Campbell9b513092006-12-12 14:27:41 -08004010/**
4011 * ib_dma_map_page - Map a physical page to DMA address
4012 * @dev: The device for which the dma_addr is to be created
4013 * @page: The page to be mapped
4014 * @offset: The offset within the page
4015 * @size: The size of the region in bytes
4016 * @direction: The direction of the DMA
4017 */
4018static inline u64 ib_dma_map_page(struct ib_device *dev,
4019 struct page *page,
4020 unsigned long offset,
4021 size_t size,
4022 enum dma_data_direction direction)
4023{
Bart Van Assche0957c292017-03-07 22:56:53 +00004024 return dma_map_page(dev->dma_device, page, offset, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08004025}
4026
4027/**
4028 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4029 * @dev: The device for which the DMA address was created
4030 * @addr: The DMA address
4031 * @size: The size of the region in bytes
4032 * @direction: The direction of the DMA
4033 */
4034static inline void ib_dma_unmap_page(struct ib_device *dev,
4035 u64 addr, size_t size,
4036 enum dma_data_direction direction)
4037{
Bart Van Assche0957c292017-03-07 22:56:53 +00004038 dma_unmap_page(dev->dma_device, addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08004039}
4040
4041/**
4042 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4043 * @dev: The device for which the DMA addresses are to be created
4044 * @sg: The array of scatter/gather entries
4045 * @nents: The number of scatter/gather entries
4046 * @direction: The direction of the DMA
4047 */
4048static inline int ib_dma_map_sg(struct ib_device *dev,
4049 struct scatterlist *sg, int nents,
4050 enum dma_data_direction direction)
4051{
Bart Van Assche0957c292017-03-07 22:56:53 +00004052 return dma_map_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08004053}
4054
4055/**
4056 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4057 * @dev: The device for which the DMA addresses were created
4058 * @sg: The array of scatter/gather entries
4059 * @nents: The number of scatter/gather entries
4060 * @direction: The direction of the DMA
4061 */
4062static inline void ib_dma_unmap_sg(struct ib_device *dev,
4063 struct scatterlist *sg, int nents,
4064 enum dma_data_direction direction)
4065{
Bart Van Assche0957c292017-03-07 22:56:53 +00004066 dma_unmap_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08004067}
4068
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004069static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4070 struct scatterlist *sg, int nents,
4071 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07004072 unsigned long dma_attrs)
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004073{
Bart Van Assche0957c292017-03-07 22:56:53 +00004074 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4075 dma_attrs);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004076}
4077
4078static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4079 struct scatterlist *sg, int nents,
4080 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07004081 unsigned long dma_attrs)
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004082{
Bart Van Assche0957c292017-03-07 22:56:53 +00004083 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07004084}
Ralph Campbell9b513092006-12-12 14:27:41 -08004085
4086/**
Bart Van Assche0b5cb332019-01-22 10:25:20 -08004087 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4088 * @dev: The device to query
4089 *
4090 * The returned value represents a size in bytes.
4091 */
4092static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4093{
Bart Van Asscheecdfdfd2019-10-25 15:58:27 -07004094 return dma_get_max_seg_size(dev->dma_device);
Bart Van Assche0b5cb332019-01-22 10:25:20 -08004095}
4096
4097/**
Ralph Campbell9b513092006-12-12 14:27:41 -08004098 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4099 * @dev: The device for which the DMA address was created
4100 * @addr: The DMA address
4101 * @size: The size of the region in bytes
4102 * @dir: The direction of the DMA
4103 */
4104static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4105 u64 addr,
4106 size_t size,
4107 enum dma_data_direction dir)
4108{
Bart Van Assche0957c292017-03-07 22:56:53 +00004109 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08004110}
4111
4112/**
4113 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4114 * @dev: The device for which the DMA address was created
4115 * @addr: The DMA address
4116 * @size: The size of the region in bytes
4117 * @dir: The direction of the DMA
4118 */
4119static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4120 u64 addr,
4121 size_t size,
4122 enum dma_data_direction dir)
4123{
Bart Van Assche0957c292017-03-07 22:56:53 +00004124 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08004125}
4126
4127/**
4128 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
4129 * @dev: The device for which the DMA address is requested
4130 * @size: The size of the region to allocate in bytes
4131 * @dma_handle: A pointer for returning the DMA address of the region
4132 * @flag: memory allocator flags
4133 */
4134static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4135 size_t size,
Bart Van Assched43dbac2017-01-20 13:04:10 -08004136 dma_addr_t *dma_handle,
Ralph Campbell9b513092006-12-12 14:27:41 -08004137 gfp_t flag)
4138{
Bart Van Assche0957c292017-03-07 22:56:53 +00004139 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
Ralph Campbell9b513092006-12-12 14:27:41 -08004140}
4141
4142/**
4143 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
4144 * @dev: The device for which the DMA addresses were allocated
4145 * @size: The size of the region
4146 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
4147 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
4148 */
4149static inline void ib_dma_free_coherent(struct ib_device *dev,
4150 size_t size, void *cpu_addr,
Bart Van Assched43dbac2017-01-20 13:04:10 -08004151 dma_addr_t dma_handle)
Ralph Campbell9b513092006-12-12 14:27:41 -08004152{
Bart Van Assche0957c292017-03-07 22:56:53 +00004153 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
Ralph Campbell9b513092006-12-12 14:27:41 -08004154}
4155
4156/**
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004157 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4158 * HCA translation table.
4159 * @mr: The memory region to deregister.
4160 * @udata: Valid user data or NULL for kernel object
4161 *
4162 * This function can fail, if the memory region has memory windows bound to it.
4163 */
4164int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4165
4166/**
4167 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 * HCA translation table.
4169 * @mr: The memory region to deregister.
Shani Michaeli7083e422013-02-06 16:19:12 +00004170 *
4171 * This function can fail, if the memory region has memory windows bound to it.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004172 *
4173 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004175static inline int ib_dereg_mr(struct ib_mr *mr)
4176{
4177 return ib_dereg_mr_user(mr, NULL);
4178}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004180struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4181 u32 max_num_sg, struct ib_udata *udata);
4182
4183static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4184 enum ib_mr_type mr_type, u32 max_num_sg)
4185{
4186 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4187}
Steve Wise00f7ec32008-07-14 23:48:45 -07004188
Israel Rukshin26bc7ea2019-06-11 18:52:39 +03004189struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4190 u32 max_num_data_sg,
4191 u32 max_num_meta_sg);
4192
Steve Wise00f7ec32008-07-14 23:48:45 -07004193/**
Steve Wise00f7ec32008-07-14 23:48:45 -07004194 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4195 * R_Key and L_Key.
4196 * @mr - struct ib_mr pointer to be updated.
4197 * @newkey - new key to be used.
4198 */
4199static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4200{
4201 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4202 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4203}
4204
4205/**
Shani Michaeli7083e422013-02-06 16:19:12 +00004206 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4207 * for calculating a new rkey for type 2 memory windows.
4208 * @rkey - the rkey to increment.
4209 */
4210static inline u32 ib_inc_rkey(u32 rkey)
4211{
4212 const u32 mask = 0x000000ff;
4213 return ((rkey + 1) & mask) | (rkey & ~mask);
4214}
4215
4216/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 * ib_alloc_fmr - Allocates a unmapped fast memory region.
4218 * @pd: The protection domain associated with the unmapped region.
4219 * @mr_access_flags: Specifies the memory access rights.
4220 * @fmr_attr: Attributes of the unmapped region.
4221 *
4222 * A fast memory region must be mapped before it can be used as part of
4223 * a work request.
4224 */
4225struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
4226 int mr_access_flags,
4227 struct ib_fmr_attr *fmr_attr);
4228
4229/**
4230 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
4231 * @fmr: The fast memory region to associate with the pages.
4232 * @page_list: An array of physical pages to map to the fast memory region.
4233 * @list_len: The number of pages in page_list.
4234 * @iova: The I/O virtual address to use with the mapped region.
4235 */
4236static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
4237 u64 *page_list, int list_len,
4238 u64 iova)
4239{
Kamal Heib3023a1e2018-12-10 21:09:48 +02004240 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241}
4242
4243/**
4244 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
4245 * @fmr_list: A linked list of fast memory regions to unmap.
4246 */
4247int ib_unmap_fmr(struct list_head *fmr_list);
4248
4249/**
4250 * ib_dealloc_fmr - Deallocates a fast memory region.
4251 * @fmr: The fast memory region to deallocate.
4252 */
4253int ib_dealloc_fmr(struct ib_fmr *fmr);
4254
4255/**
4256 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4257 * @qp: QP to attach to the multicast group. The QP must be type
4258 * IB_QPT_UD.
4259 * @gid: Multicast group GID.
4260 * @lid: Multicast group LID in host byte order.
4261 *
4262 * In order to send and receive multicast packets, subnet
4263 * administration must have created the multicast group and configured
4264 * the fabric appropriately. The port associated with the specified
4265 * QP must also be a member of the multicast group.
4266 */
4267int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4268
4269/**
4270 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4271 * @qp: QP to detach from the multicast group.
4272 * @gid: Multicast group GID.
4273 * @lid: Multicast group LID in host byte order.
4274 */
4275int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4276
Sean Hefty59991f92011-05-23 17:52:46 -07004277/**
4278 * ib_alloc_xrcd - Allocates an XRC domain.
4279 * @device: The device on which to allocate the XRC domain.
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02004280 * @caller: Module name for kernel consumers
Sean Hefty59991f92011-05-23 17:52:46 -07004281 */
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02004282struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4283#define ib_alloc_xrcd(device) \
4284 __ib_alloc_xrcd((device), KBUILD_MODNAME)
Sean Hefty59991f92011-05-23 17:52:46 -07004285
4286/**
4287 * ib_dealloc_xrcd - Deallocates an XRC domain.
4288 * @xrcd: The XRC domain to deallocate.
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004289 * @udata: Valid user data or NULL for kernel object
Sean Hefty59991f92011-05-23 17:52:46 -07004290 */
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004291int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
Sean Hefty59991f92011-05-23 17:52:46 -07004292
Eli Cohen1c636f82013-10-31 15:26:32 +02004293static inline int ib_check_mr_access(int flags)
4294{
4295 /*
4296 * Local write permission is required if remote write or
4297 * remote atomic permission is also requested.
4298 */
4299 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4300 !(flags & IB_ACCESS_LOCAL_WRITE))
4301 return -EINVAL;
4302
4303 return 0;
4304}
4305
Jack Morgenstein08bb5582018-05-23 15:30:30 +03004306static inline bool ib_access_writable(int access_flags)
4307{
4308 /*
4309 * We have writable memory backing the MR if any of the following
4310 * access flags are set. "Local write" and "remote write" obviously
4311 * require write access. "Remote atomic" can do things like fetch and
4312 * add, which will modify memory, and "MW bind" can change permissions
4313 * by binding a window.
4314 */
4315 return access_flags &
4316 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4317 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4318}
4319
Sagi Grimberg1b01d332014-02-23 14:19:05 +02004320/**
4321 * ib_check_mr_status: lightweight check of MR status.
4322 * This routine may provide status checks on a selected
4323 * ib_mr. first use is for signature status check.
4324 *
4325 * @mr: A memory region.
4326 * @check_mask: Bitmask of which checks to perform from
4327 * ib_mr_status_check enumeration.
4328 * @mr_status: The container of relevant status checks.
4329 * failed checks will be indicated in the status bitmask
4330 * and the relevant info shall be in the error item.
4331 */
4332int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4333 struct ib_mr_status *mr_status);
4334
Jason Gunthorped79af722019-01-10 14:02:24 -07004335/**
4336 * ib_device_try_get: Hold a registration lock
4337 * device: The device to lock
4338 *
4339 * A device under an active registration lock cannot become unregistered. It
4340 * is only possible to obtain a registration lock on a device that is fully
4341 * registered, otherwise this function returns false.
4342 *
4343 * The registration lock is only necessary for actions which require the
4344 * device to still be registered. Uses that only require the device pointer to
4345 * be valid should use get_device(&ibdev->dev) to hold the memory.
4346 *
4347 */
4348static inline bool ib_device_try_get(struct ib_device *dev)
4349{
4350 return refcount_inc_not_zero(&dev->refcount);
4351}
4352
4353void ib_device_put(struct ib_device *device);
Jason Gunthorpe324e2272019-02-12 21:12:51 -07004354struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4355 enum rdma_driver_id driver_id);
4356struct ib_device *ib_device_get_by_name(const char *name,
4357 enum rdma_driver_id driver_id);
Yotam Kenneth9268f722015-07-30 17:50:15 +03004358struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4359 u16 pkey, const union ib_gid *gid,
4360 const struct sockaddr *addr);
Jason Gunthorpec2261dd2019-02-12 21:12:50 -07004361int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4362 unsigned int port);
4363struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4364
Yishai Hadas5fd251c2016-05-23 15:20:48 +03004365struct ib_wq *ib_create_wq(struct ib_pd *pd,
4366 struct ib_wq_init_attr *init_attr);
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03004367int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
Yishai Hadas5fd251c2016-05-23 15:20:48 +03004368int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4369 u32 wq_attr_mask);
Yishai Hadas6d397862016-05-23 15:20:51 +03004370struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4371 struct ib_rwq_ind_table_init_attr*
4372 wq_ind_table_init_attr);
4373int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
Yotam Kenneth9268f722015-07-30 17:50:15 +03004374
Christoph Hellwigff2ba992016-05-03 18:01:04 +02004375int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07004376 unsigned int *sg_offset, unsigned int page_size);
Max Gurtovoy2cdfcdd2019-06-11 18:52:40 +03004377int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4378 int data_sg_nents, unsigned int *data_sg_offset,
4379 struct scatterlist *meta_sg, int meta_sg_nents,
4380 unsigned int *meta_sg_offset, unsigned int page_size);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03004381
4382static inline int
Christoph Hellwigff2ba992016-05-03 18:01:04 +02004383ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07004384 unsigned int *sg_offset, unsigned int page_size)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03004385{
4386 int n;
4387
Christoph Hellwigff2ba992016-05-03 18:01:04 +02004388 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03004389 mr->iova = 0;
4390
4391 return n;
4392}
4393
Christoph Hellwigff2ba992016-05-03 18:01:04 +02004394int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07004395 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03004396
Steve Wise765d6772016-02-17 08:15:41 -08004397void ib_drain_rq(struct ib_qp *qp);
4398void ib_drain_sq(struct ib_qp *qp);
4399void ib_drain_qp(struct ib_qp *qp);
Moni Shoua850d8fd2016-11-10 11:30:56 +02004400
Yuval Shaiad4186192017-06-14 23:13:34 +03004401int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004402
4403static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4404{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004405 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4406 return attr->roce.dmac;
4407 return NULL;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004408}
4409
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004410static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004411{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004412 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004413 attr->ib.dlid = (u16)dlid;
4414 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4415 attr->opa.dlid = dlid;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004416}
4417
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004418static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004419{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004420 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4421 return attr->ib.dlid;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004422 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4423 return attr->opa.dlid;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004424 return 0;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004425}
4426
4427static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4428{
4429 attr->sl = sl;
4430}
4431
4432static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4433{
4434 return attr->sl;
4435}
4436
4437static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4438 u8 src_path_bits)
4439{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004440 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4441 attr->ib.src_path_bits = src_path_bits;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004442 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4443 attr->opa.src_path_bits = src_path_bits;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004444}
4445
4446static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4447{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004448 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4449 return attr->ib.src_path_bits;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004450 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4451 return attr->opa.src_path_bits;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004452 return 0;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004453}
4454
Don Hiattd98bb7f2017-08-04 13:54:16 -07004455static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4456 bool make_grd)
4457{
4458 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4459 attr->opa.make_grd = make_grd;
4460}
4461
4462static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4463{
4464 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4465 return attr->opa.make_grd;
4466 return false;
4467}
4468
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004469static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4470{
4471 attr->port_num = port_num;
4472}
4473
4474static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4475{
4476 return attr->port_num;
4477}
4478
4479static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4480 u8 static_rate)
4481{
4482 attr->static_rate = static_rate;
4483}
4484
4485static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4486{
4487 return attr->static_rate;
4488}
4489
4490static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4491 enum ib_ah_flags flag)
4492{
4493 attr->ah_flags = flag;
4494}
4495
4496static inline enum ib_ah_flags
4497 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4498{
4499 return attr->ah_flags;
4500}
4501
4502static inline const struct ib_global_route
4503 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4504{
4505 return &attr->grh;
4506}
4507
4508/*To retrieve and modify the grh */
4509static inline struct ib_global_route
4510 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4511{
4512 return &attr->grh;
4513}
4514
4515static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4516{
4517 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4518
4519 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4520}
4521
4522static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4523 __be64 prefix)
4524{
4525 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4526
4527 grh->dgid.global.subnet_prefix = prefix;
4528}
4529
4530static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4531 __be64 if_id)
4532{
4533 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4534
4535 grh->dgid.global.interface_id = if_id;
4536}
4537
4538static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4539 union ib_gid *dgid, u32 flow_label,
4540 u8 sgid_index, u8 hop_limit,
4541 u8 traffic_class)
4542{
4543 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4544
4545 attr->ah_flags = IB_AH_GRH;
4546 if (dgid)
4547 grh->dgid = *dgid;
4548 grh->flow_label = flow_label;
4549 grh->sgid_index = sgid_index;
4550 grh->hop_limit = hop_limit;
4551 grh->traffic_class = traffic_class;
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004552 grh->sgid_attr = NULL;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004553}
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004554
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004555void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4556void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4557 u32 flow_label, u8 hop_limit, u8 traffic_class,
4558 const struct ib_gid_attr *sgid_attr);
Jason Gunthorped97099f2018-06-13 10:22:05 +03004559void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4560 const struct rdma_ah_attr *src);
4561void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4562 const struct rdma_ah_attr *new);
4563void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004564
Don Hiatt87daac62018-02-01 10:57:03 -08004565/**
4566 * rdma_ah_find_type - Return address handle type.
4567 *
4568 * @dev: Device to be checked
4569 * @port_num: Port number
4570 */
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004571static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
Don Hiatt87daac62018-02-01 10:57:03 -08004572 u8 port_num)
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004573{
Parav Pandita6532e72018-01-12 07:58:42 +02004574 if (rdma_protocol_roce(dev, port_num))
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004575 return RDMA_AH_ATTR_TYPE_ROCE;
Don Hiatt87daac62018-02-01 10:57:03 -08004576 if (rdma_protocol_ib(dev, port_num)) {
4577 if (rdma_cap_opa_ah(dev, port_num))
4578 return RDMA_AH_ATTR_TYPE_OPA;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004579 return RDMA_AH_ATTR_TYPE_IB;
Don Hiatt87daac62018-02-01 10:57:03 -08004580 }
4581
4582 return RDMA_AH_ATTR_TYPE_UNDEFINED;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004583}
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004584
Hiatt, Don62ede772017-08-14 14:17:43 -04004585/**
4586 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4587 * In the current implementation the only way to get
4588 * get the 32bit lid is from other sources for OPA.
4589 * For IB, lids will always be 16bits so cast the
4590 * value accordingly.
4591 *
4592 * @lid: A 32bit LID
4593 */
4594static inline u16 ib_lid_cpu16(u32 lid)
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004595{
Hiatt, Don62ede772017-08-14 14:17:43 -04004596 WARN_ON_ONCE(lid & 0xFFFF0000);
4597 return (u16)lid;
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004598}
4599
Hiatt, Don62ede772017-08-14 14:17:43 -04004600/**
4601 * ib_lid_be16 - Return lid in 16bit BE encoding.
4602 *
4603 * @lid: A 32bit LID
4604 */
4605static inline __be16 ib_lid_be16(u32 lid)
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004606{
Hiatt, Don62ede772017-08-14 14:17:43 -04004607 WARN_ON_ONCE(lid & 0xFFFF0000);
4608 return cpu_to_be16((u16)lid);
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004609}
Doug Ledford32043832017-08-10 14:31:29 -04004610
Sagi Grimbergc66cd352017-07-13 11:09:41 +03004611/**
4612 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4613 * vector
4614 * @device: the rdma device
4615 * @comp_vector: index of completion vector
4616 *
4617 * Returns NULL on failure, otherwise a corresponding cpu map of the
4618 * completion vector (returns all-cpus map if the device driver doesn't
4619 * implement get_vector_affinity).
4620 */
4621static inline const struct cpumask *
4622ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4623{
4624 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
Kamal Heib3023a1e2018-12-10 21:09:48 +02004625 !device->ops.get_vector_affinity)
Sagi Grimbergc66cd352017-07-13 11:09:41 +03004626 return NULL;
4627
Kamal Heib3023a1e2018-12-10 21:09:48 +02004628 return device->ops.get_vector_affinity(device, comp_vector);
Sagi Grimbergc66cd352017-07-13 11:09:41 +03004629
4630}
4631
Daniel Jurgens32f69e42018-01-04 17:25:36 +02004632/**
4633 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4634 * and add their gids, as needed, to the relevant RoCE devices.
4635 *
4636 * @device: the rdma device
4637 */
4638void rdma_roce_rescan_device(struct ib_device *ibdev);
4639
Jason Gunthorpe8313c102018-11-25 20:51:13 +02004640struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
Yishai Hadas7dc08dc2018-06-17 12:59:59 +03004641
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02004642int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
Denis Drozdovf6a8a192018-08-14 14:08:51 +03004643
4644struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4645 enum rdma_netdev_t type, const char *name,
4646 unsigned char name_assign_type,
4647 void (*setup)(struct net_device *));
Denis Drozdov5d6b0cb2018-08-14 14:22:35 +03004648
4649int rdma_init_netdev(struct ib_device *device, u8 port_num,
4650 enum rdma_netdev_t type, const char *name,
4651 unsigned char name_assign_type,
4652 void (*setup)(struct net_device *),
4653 struct net_device *netdev);
4654
Parav Panditd4122f52018-10-11 22:31:53 +03004655/**
4656 * rdma_set_device_sysfs_group - Set device attributes group to have
4657 * driver specific sysfs entries at
4658 * for infiniband class.
4659 *
4660 * @device: device pointer for which attributes to be created
4661 * @group: Pointer to group which should be added when device
4662 * is registered with sysfs.
4663 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4664 * group per device to have sysfs attributes.
4665 *
4666 * NOTE: New drivers should not make use of this API; instead new device
4667 * parameter should be exposed via netlink command. This API and mechanism
4668 * exist only for existing drivers.
4669 */
4670static inline void
4671rdma_set_device_sysfs_group(struct ib_device *dev,
4672 const struct attribute_group *group)
4673{
4674 dev->groups[1] = group;
4675}
4676
Parav Pandit54747232018-12-18 14:15:56 +02004677/**
4678 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4679 *
4680 * @device: device pointer for which ib_device pointer to retrieve
4681 *
4682 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4683 *
4684 */
4685static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4686{
Parav Panditcebe5562019-02-26 13:56:11 +02004687 struct ib_core_device *coredev =
4688 container_of(device, struct ib_core_device, dev);
4689
4690 return coredev->owner;
Parav Pandit54747232018-12-18 14:15:56 +02004691}
4692
4693/**
4694 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4695 * ib_device holder structure from device pointer.
4696 *
4697 * NOTE: New drivers should not make use of this API; This API is only for
4698 * existing drivers who have exposed sysfs entries using
4699 * rdma_set_device_sysfs_group().
4700 */
4701#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4702 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
Parav Pandit41c61402019-02-26 14:01:46 +02004703
4704bool rdma_dev_access_netns(const struct ib_device *device,
4705 const struct net *net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706#endif /* IB_VERBS_H */