blob: b03e248521860fc3aa356e2f5353bda77d62f649 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07007 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08008 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
Ralph Campbell9b513092006-12-12 14:27:41 -080044#include <linux/dma-mapping.h>
Michael S. Tsirkin459d6e22007-02-04 14:11:55 -080045#include <linux/kref.h>
Dotan Barakbfb3ea12007-07-31 16:49:15 +030046#include <linux/list.h>
47#include <linux/rwsem.h>
Tejun Heof0626712010-10-19 15:24:36 +000048#include <linux/workqueue.h>
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080049#include <linux/irq_poll.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020050#include <uapi/linux/if_ether.h>
Somnath Koturc865f242015-12-23 14:56:51 +020051#include <net/ipv6.h>
52#include <net/ip.h>
Matan Barak301a7212015-12-15 20:30:10 +020053#include <linux/string.h>
54#include <linux/slab.h>
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -070055#include <linux/netdevice.h>
Parav Pandit01b67112018-11-16 03:50:57 +020056#include <linux/refcount.h>
Eli Cohen50174a72016-03-11 22:58:38 +020057#include <linux/if_link.h>
Arun Sharma600634972011-07-26 16:09:06 -070058#include <linux/atomic.h>
Haggai Eran882214e2014-12-11 17:04:18 +020059#include <linux/mmu_notifier.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080060#include <linux/uaccess.h>
Parav Pandit43579b52017-01-10 00:02:14 +000061#include <linux/cgroup_rdma.h>
Nicolas Dichtelea6819e2017-03-27 14:20:14 +020062#include <uapi/rdma/ib_user_verbs.h>
Leon Romanovsky02d88832018-01-28 11:17:20 +020063#include <rdma/restrack.h>
Matan Barak0ede73b2018-03-19 15:02:34 +020064#include <uapi/rdma/rdma_user_ioctl.h>
Matan Barak2eb9bea2018-03-28 09:27:45 +030065#include <uapi/rdma/ib_user_ioctl_verbs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Leon Romanovsky9abb0d12017-06-27 16:49:53 +030067#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
68
Jason Gunthorpeb5231b02018-09-16 20:48:04 +030069struct ib_umem_odp;
70
Tejun Heof0626712010-10-19 15:24:36 +000071extern struct workqueue_struct *ib_wq;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080072extern struct workqueue_struct *ib_comp_wq;
Jack Morgensteinf7948092018-08-27 08:35:55 +030073extern struct workqueue_struct *ib_comp_unbound_wq;
Tejun Heof0626712010-10-19 15:24:36 +000074
Linus Torvalds1da177e2005-04-16 15:20:36 -070075union ib_gid {
76 u8 raw[16];
77 struct {
Sean Hefty97f52eb2005-08-13 21:05:57 -070078 __be64 subnet_prefix;
79 __be64 interface_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 } global;
81};
82
Moni Shouae26be1b2015-07-30 18:33:29 +030083extern union ib_gid zgid;
84
Matan Barakb39ffa12015-12-23 14:56:47 +020085enum ib_gid_type {
86 /* If link layer is Ethernet, this is RoCE V1 */
87 IB_GID_TYPE_IB = 0,
88 IB_GID_TYPE_ROCE = 0,
Matan Barak7766a992015-12-23 14:56:50 +020089 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
Matan Barakb39ffa12015-12-23 14:56:47 +020090 IB_GID_TYPE_SIZE
91};
92
Moni Shoua7ead4bc2016-01-14 17:50:38 +020093#define ROCE_V2_UDP_DPORT 4791
Matan Barak03db3a22015-07-30 18:33:26 +030094struct ib_gid_attr {
95 struct net_device *ndev;
Parav Pandit598ff6b2018-04-01 15:08:21 +030096 struct ib_device *device;
Parav Panditb150c382018-06-05 08:40:15 +030097 union ib_gid gid;
Parav Pandit598ff6b2018-04-01 15:08:21 +030098 enum ib_gid_type gid_type;
99 u16 index;
100 u8 port_num;
Matan Barak03db3a22015-07-30 18:33:26 +0300101};
102
Tom Tucker07ebafb2006-08-03 16:02:42 -0500103enum rdma_node_type {
104 /* IB values map to NodeInfo:NodeType. */
105 RDMA_NODE_IB_CA = 1,
106 RDMA_NODE_IB_SWITCH,
107 RDMA_NODE_IB_ROUTER,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000108 RDMA_NODE_RNIC,
109 RDMA_NODE_USNIC,
Upinder Malhi5db57652014-01-15 17:02:36 -0800110 RDMA_NODE_USNIC_UDP,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111};
112
Eli Cohena0c1b2a2016-03-11 22:58:37 +0200113enum {
114 /* set the local administered indication */
115 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
116};
117
Tom Tucker07ebafb2006-08-03 16:02:42 -0500118enum rdma_transport_type {
119 RDMA_TRANSPORT_IB,
Upinder Malhi \(umalhi\)180771a2013-09-10 03:36:59 +0000120 RDMA_TRANSPORT_IWARP,
Upinder Malhi248567f2014-01-09 14:48:19 -0800121 RDMA_TRANSPORT_USNIC,
122 RDMA_TRANSPORT_USNIC_UDP
Tom Tucker07ebafb2006-08-03 16:02:42 -0500123};
124
Michael Wang6b90a6d2015-05-05 14:50:18 +0200125enum rdma_protocol_type {
126 RDMA_PROTOCOL_IB,
127 RDMA_PROTOCOL_IBOE,
128 RDMA_PROTOCOL_IWARP,
129 RDMA_PROTOCOL_USNIC_UDP
130};
131
Roland Dreier8385fd82014-06-04 10:00:16 -0700132__attribute_const__ enum rdma_transport_type
133rdma_node_get_transport(enum rdma_node_type node_type);
Tom Tucker07ebafb2006-08-03 16:02:42 -0500134
Somnath Koturc865f242015-12-23 14:56:51 +0200135enum rdma_network_type {
136 RDMA_NETWORK_IB,
137 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
138 RDMA_NETWORK_IPV4,
139 RDMA_NETWORK_IPV6
140};
141
142static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
143{
144 if (network_type == RDMA_NETWORK_IPV4 ||
145 network_type == RDMA_NETWORK_IPV6)
146 return IB_GID_TYPE_ROCE_UDP_ENCAP;
147
148 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
149 return IB_GID_TYPE_IB;
150}
151
Parav Pandit47ec3862018-06-13 10:22:06 +0300152static inline enum rdma_network_type
153rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
Somnath Koturc865f242015-12-23 14:56:51 +0200154{
Parav Pandit47ec3862018-06-13 10:22:06 +0300155 if (attr->gid_type == IB_GID_TYPE_IB)
Somnath Koturc865f242015-12-23 14:56:51 +0200156 return RDMA_NETWORK_IB;
157
Parav Pandit47ec3862018-06-13 10:22:06 +0300158 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
Somnath Koturc865f242015-12-23 14:56:51 +0200159 return RDMA_NETWORK_IPV4;
160 else
161 return RDMA_NETWORK_IPV6;
162}
163
Eli Cohena3f5ada2010-09-27 17:51:10 -0700164enum rdma_link_layer {
165 IB_LINK_LAYER_UNSPECIFIED,
166 IB_LINK_LAYER_INFINIBAND,
167 IB_LINK_LAYER_ETHERNET,
168};
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170enum ib_device_cap_flags {
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200171 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
172 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
173 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
174 IB_DEVICE_RAW_MULTI = (1 << 3),
175 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
176 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
177 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
178 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
179 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300180 /* Not in use, former INIT_TYPE = (1 << 9),*/
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200181 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
182 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
183 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
184 IB_DEVICE_SRQ_RESIZE = (1 << 13),
185 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
Christoph Hellwigb1adc712015-12-23 19:12:45 +0100186
187 /*
188 * This device supports a per-device lkey or stag that can be
189 * used without performing a memory registration for the local
190 * memory. Note that ULPs should never check this flag, but
191 * instead of use the local_dma_lkey flag in the ib_pd structure,
192 * which will always contain a usable lkey.
193 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200194 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300195 /* Reserved, old SEND_W_INV = (1 << 16),*/
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200196 IB_DEVICE_MEM_WINDOW = (1 << 17),
Eli Cohene0605d92008-01-30 18:30:57 +0200197 /*
198 * Devices should set IB_DEVICE_UD_IP_SUM if they support
199 * insertion of UDP and TCP checksum on outgoing UD IPoIB
200 * messages and can verify the validity of checksum for
201 * incoming messages. Setting this flag implies that the
202 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
203 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200204 IB_DEVICE_UD_IP_CSUM = (1 << 18),
205 IB_DEVICE_UD_TSO = (1 << 19),
206 IB_DEVICE_XRC = (1 << 20),
Christoph Hellwigb1adc712015-12-23 19:12:45 +0100207
208 /*
209 * This device supports the IB "base memory management extension",
210 * which includes support for fast registrations (IB_WR_REG_MR,
211 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
212 * also be set by any iWarp device which must support FRs to comply
213 * to the iWarp verbs spec. iWarp devices also support the
214 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
215 * stag.
216 */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200217 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
218 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
219 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
220 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
221 IB_DEVICE_RC_IP_CSUM = (1 << 25),
Noa Osherovichebaaee22017-01-18 15:39:54 +0200222 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200223 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
Leon Romanovsky8a06ce52015-12-20 12:16:10 +0200224 /*
225 * Devices should set IB_DEVICE_CROSS_CHANNEL if they
226 * support execution of WQEs that involve synchronization
227 * of I/O operations with single completion queue managed
228 * by hardware.
229 */
Leon Romanovsky78b57f92017-08-17 15:50:37 +0300230 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
Leon Romanovsky7ca0bc52015-12-20 12:16:09 +0200231 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
232 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
Max Gurtovoy47355b32016-06-06 19:34:39 +0300233 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200234 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
Max Gurtovoyc7e162a2016-06-06 19:34:40 +0300235 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
Noa Osherovichebaaee22017-01-18 15:39:54 +0200236 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
Max Gurtovoyc7e162a2016-06-06 19:34:40 +0300237 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
Vishwanathapura, Niranjana62e45942017-04-12 20:29:21 -0700238 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
Noa Osheroviche1d2e882017-10-29 13:59:44 +0200239 /* The device supports padding incoming writes to cacheline. */
240 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200241};
242
243enum ib_signature_prot_cap {
244 IB_PROT_T10DIF_TYPE_1 = 1,
245 IB_PROT_T10DIF_TYPE_2 = 1 << 1,
246 IB_PROT_T10DIF_TYPE_3 = 1 << 2,
247};
248
249enum ib_signature_guard_cap {
250 IB_GUARD_T10DIF_CRC = 1,
251 IB_GUARD_T10DIF_CSUM = 1 << 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252};
253
254enum ib_atomic_cap {
255 IB_ATOMIC_NONE,
256 IB_ATOMIC_HCA,
257 IB_ATOMIC_GLOB
258};
259
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200260enum ib_odp_general_cap_bits {
Artemy Kovalyov25bf14d2017-01-18 16:58:06 +0200261 IB_ODP_SUPPORT = 1 << 0,
262 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200263};
264
265enum ib_odp_transport_cap_bits {
266 IB_ODP_SUPPORT_SEND = 1 << 0,
267 IB_ODP_SUPPORT_RECV = 1 << 1,
268 IB_ODP_SUPPORT_WRITE = 1 << 2,
269 IB_ODP_SUPPORT_READ = 1 << 3,
270 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
271};
272
273struct ib_odp_caps {
274 uint64_t general_caps;
275 struct {
276 uint32_t rc_odp_caps;
277 uint32_t uc_odp_caps;
278 uint32_t ud_odp_caps;
279 } per_transport_caps;
280};
281
Yishai Hadasccf20562016-08-28 11:28:43 +0300282struct ib_rss_caps {
283 /* Corresponding bit will be set if qp type from
284 * 'enum ib_qp_type' is supported, e.g.
285 * supported_qpts |= 1 << IB_QPT_UD
286 */
287 u32 supported_qpts;
288 u32 max_rwq_indirection_tables;
289 u32 max_rwq_indirection_table_size;
290};
291
Artemy Kovalyov6938fc1e2017-08-17 15:52:03 +0300292enum ib_tm_cap_flags {
293 /* Support tag matching on RC transport */
294 IB_TM_CAP_RC = 1 << 0,
295};
296
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300297struct ib_tm_caps {
Artemy Kovalyov6938fc1e2017-08-17 15:52:03 +0300298 /* Max size of RNDV header */
299 u32 max_rndv_hdr_size;
300 /* Max number of entries in tag matching list */
301 u32 max_num_tags;
302 /* From enum ib_tm_cap_flags */
303 u32 flags;
304 /* Max number of outstanding list operations */
305 u32 max_ops;
306 /* Max number of SGE in tag matching entry */
307 u32 max_sge;
308};
309
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300310struct ib_cq_init_attr {
311 unsigned int cqe;
312 int comp_vector;
313 u32 flags;
314};
315
Yonatan Cohen869ddcf2017-11-13 10:51:13 +0200316enum ib_cq_attr_mask {
317 IB_CQ_MODERATE = 1 << 0,
318};
319
Yonatan Cohen18bd9072017-11-13 10:51:16 +0200320struct ib_cq_caps {
321 u16 max_cq_moderation_count;
322 u16 max_cq_moderation_period;
323};
324
Ariel Levkovichbe934cc2018-04-05 18:53:25 +0300325struct ib_dm_mr_attr {
326 u64 length;
327 u64 offset;
328 u32 access_flags;
329};
330
Ariel Levkovichbee76d72018-04-05 18:53:24 +0300331struct ib_dm_alloc_attr {
332 u64 length;
333 u32 alignment;
334 u32 flags;
335};
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337struct ib_device_attr {
338 u64 fw_ver;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700339 __be64 sys_image_guid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 u64 max_mr_size;
341 u64 page_size_cap;
342 u32 vendor_id;
343 u32 vendor_part_id;
344 u32 hw_ver;
345 int max_qp;
346 int max_qp_wr;
Leon Romanovskyfb532d62016-02-23 10:25:25 +0200347 u64 device_cap_flags;
Steve Wise33023fb2018-06-18 08:05:26 -0700348 int max_send_sge;
349 int max_recv_sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 int max_sge_rd;
351 int max_cq;
352 int max_cqe;
353 int max_mr;
354 int max_pd;
355 int max_qp_rd_atom;
356 int max_ee_rd_atom;
357 int max_res_rd_atom;
358 int max_qp_init_rd_atom;
359 int max_ee_init_rd_atom;
360 enum ib_atomic_cap atomic_cap;
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300361 enum ib_atomic_cap masked_atomic_cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 int max_ee;
363 int max_rdd;
364 int max_mw;
365 int max_raw_ipv6_qp;
366 int max_raw_ethy_qp;
367 int max_mcast_grp;
368 int max_mcast_qp_attach;
369 int max_total_mcast_qp_attach;
370 int max_ah;
371 int max_fmr;
372 int max_map_per_fmr;
373 int max_srq;
374 int max_srq_wr;
375 int max_srq_sge;
Steve Wise00f7ec32008-07-14 23:48:45 -0700376 unsigned int max_fast_reg_page_list_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 u16 max_pkeys;
378 u8 local_ca_ack_delay;
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200379 int sig_prot_cap;
380 int sig_guard_cap;
Sagi Grimberg860f10a2014-12-11 17:04:16 +0200381 struct ib_odp_caps odp_caps;
Matan Barak24306dc2015-06-11 16:35:24 +0300382 uint64_t timestamp_mask;
383 uint64_t hca_core_clock; /* in KHZ */
Yishai Hadasccf20562016-08-28 11:28:43 +0300384 struct ib_rss_caps rss_caps;
385 u32 max_wq_type_rq;
Noa Osherovichebaaee22017-01-18 15:39:54 +0200386 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
Leon Romanovsky78b1beb2017-09-24 21:46:29 +0300387 struct ib_tm_caps tm_caps;
Yonatan Cohen18bd9072017-11-13 10:51:16 +0200388 struct ib_cq_caps cq_caps;
Ariel Levkovich1d8eeb92018-04-05 18:53:23 +0300389 u64 max_dm_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390};
391
392enum ib_mtu {
393 IB_MTU_256 = 1,
394 IB_MTU_512 = 2,
395 IB_MTU_1024 = 3,
396 IB_MTU_2048 = 4,
397 IB_MTU_4096 = 5
398};
399
400static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
401{
402 switch (mtu) {
403 case IB_MTU_256: return 256;
404 case IB_MTU_512: return 512;
405 case IB_MTU_1024: return 1024;
406 case IB_MTU_2048: return 2048;
407 case IB_MTU_4096: return 4096;
408 default: return -1;
409 }
410}
411
Amrani, Ramd3f4aad2016-12-26 08:40:57 +0200412static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
413{
414 if (mtu >= 4096)
415 return IB_MTU_4096;
416 else if (mtu >= 2048)
417 return IB_MTU_2048;
418 else if (mtu >= 1024)
419 return IB_MTU_1024;
420 else if (mtu >= 512)
421 return IB_MTU_512;
422 else
423 return IB_MTU_256;
424}
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426enum ib_port_state {
427 IB_PORT_NOP = 0,
428 IB_PORT_DOWN = 1,
429 IB_PORT_INIT = 2,
430 IB_PORT_ARMED = 3,
431 IB_PORT_ACTIVE = 4,
432 IB_PORT_ACTIVE_DEFER = 5
433};
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435enum ib_port_width {
436 IB_WIDTH_1X = 1,
437 IB_WIDTH_4X = 2,
438 IB_WIDTH_8X = 4,
439 IB_WIDTH_12X = 8
440};
441
442static inline int ib_width_enum_to_int(enum ib_port_width width)
443{
444 switch (width) {
445 case IB_WIDTH_1X: return 1;
446 case IB_WIDTH_4X: return 4;
447 case IB_WIDTH_8X: return 8;
448 case IB_WIDTH_12X: return 12;
449 default: return -1;
450 }
451}
452
Or Gerlitz2e966912012-02-28 18:49:50 +0200453enum ib_port_speed {
454 IB_SPEED_SDR = 1,
455 IB_SPEED_DDR = 2,
456 IB_SPEED_QDR = 4,
457 IB_SPEED_FDR10 = 8,
458 IB_SPEED_FDR = 16,
Noa Osherovich12113a32017-04-20 20:53:31 +0300459 IB_SPEED_EDR = 32,
460 IB_SPEED_HDR = 64
Or Gerlitz2e966912012-02-28 18:49:50 +0200461};
462
Christoph Lameterb40f4752016-05-16 12:49:33 -0500463/**
464 * struct rdma_hw_stats
Mark Bloche9451302018-03-27 15:51:05 +0300465 * @lock - Mutex to protect parallel write access to lifespan and values
466 * of counters, which are 64bits and not guaranteeed to be written
467 * atomicaly on 32bits systems.
Christoph Lameterb40f4752016-05-16 12:49:33 -0500468 * @timestamp - Used by the core code to track when the last update was
469 * @lifespan - Used by the core code to determine how old the counters
470 * should be before being updated again. Stored in jiffies, defaults
471 * to 10 milliseconds, drivers can override the default be specifying
472 * their own value during their allocation routine.
473 * @name - Array of pointers to static names used for the counters in
474 * directory.
475 * @num_counters - How many hardware counters there are. If name is
476 * shorter than this number, a kernel oops will result. Driver authors
477 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
478 * in their code to prevent this.
479 * @value - Array of u64 counters that are accessed by the sysfs code and
480 * filled in by the drivers get_stats routine
481 */
482struct rdma_hw_stats {
Mark Bloche9451302018-03-27 15:51:05 +0300483 struct mutex lock; /* Protect lifespan and values[] */
Christoph Lameterb40f4752016-05-16 12:49:33 -0500484 unsigned long timestamp;
485 unsigned long lifespan;
486 const char * const *names;
487 int num_counters;
488 u64 value[];
Steve Wise7f624d02008-07-14 23:48:48 -0700489};
490
Christoph Lameterb40f4752016-05-16 12:49:33 -0500491#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
492/**
493 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
494 * for drivers.
495 * @names - Array of static const char *
496 * @num_counters - How many elements in array
497 * @lifespan - How many milliseconds between updates
498 */
499static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
500 const char * const *names, int num_counters,
501 unsigned long lifespan)
502{
503 struct rdma_hw_stats *stats;
Steve Wise7f624d02008-07-14 23:48:48 -0700504
Christoph Lameterb40f4752016-05-16 12:49:33 -0500505 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
506 GFP_KERNEL);
507 if (!stats)
508 return NULL;
509 stats->names = names;
510 stats->num_counters = num_counters;
511 stats->lifespan = msecs_to_jiffies(lifespan);
Steve Wise7f624d02008-07-14 23:48:48 -0700512
Christoph Lameterb40f4752016-05-16 12:49:33 -0500513 return stats;
514}
515
Steve Wise7f624d02008-07-14 23:48:48 -0700516
Ira Weinyf9b22e32015-05-13 20:02:59 -0400517/* Define bits for the various functionality this port needs to be supported by
518 * the core.
519 */
520/* Management 0x00000FFF */
521#define RDMA_CORE_CAP_IB_MAD 0x00000001
522#define RDMA_CORE_CAP_IB_SMI 0x00000002
523#define RDMA_CORE_CAP_IB_CM 0x00000004
524#define RDMA_CORE_CAP_IW_CM 0x00000008
525#define RDMA_CORE_CAP_IB_SA 0x00000010
Ira Weiny65995fe2015-06-06 14:38:32 -0400526#define RDMA_CORE_CAP_OPA_MAD 0x00000020
Ira Weinyf9b22e32015-05-13 20:02:59 -0400527
528/* Address format 0x000FF000 */
529#define RDMA_CORE_CAP_AF_IB 0x00001000
530#define RDMA_CORE_CAP_ETH_AH 0x00002000
Dasaratharaman Chandramouli94d595c2017-03-20 19:38:09 -0400531#define RDMA_CORE_CAP_OPA_AH 0x00004000
Artemy Kovalyovb02289b2018-07-04 15:57:50 +0300532#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
Ira Weinyf9b22e32015-05-13 20:02:59 -0400533
534/* Protocol 0xFFF00000 */
535#define RDMA_CORE_CAP_PROT_IB 0x00100000
536#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
537#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
Matan Barak7766a992015-12-23 14:56:50 +0200538#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
Or Gerlitzaa773bd2017-01-24 13:02:35 +0200539#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
Or Gerlitzce1e0552017-01-24 13:02:38 +0200540#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
Ira Weinyf9b22e32015-05-13 20:02:59 -0400541
Artemy Kovalyovb02289b2018-07-04 15:57:50 +0300542#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
543 | RDMA_CORE_CAP_PROT_ROCE \
544 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
545
Ira Weinyf9b22e32015-05-13 20:02:59 -0400546#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
547 | RDMA_CORE_CAP_IB_MAD \
548 | RDMA_CORE_CAP_IB_SMI \
549 | RDMA_CORE_CAP_IB_CM \
550 | RDMA_CORE_CAP_IB_SA \
551 | RDMA_CORE_CAP_AF_IB)
552#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
553 | RDMA_CORE_CAP_IB_MAD \
554 | RDMA_CORE_CAP_IB_CM \
Ira Weinyf9b22e32015-05-13 20:02:59 -0400555 | RDMA_CORE_CAP_AF_IB \
556 | RDMA_CORE_CAP_ETH_AH)
Matan Barak7766a992015-12-23 14:56:50 +0200557#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
558 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
559 | RDMA_CORE_CAP_IB_MAD \
560 | RDMA_CORE_CAP_IB_CM \
561 | RDMA_CORE_CAP_AF_IB \
562 | RDMA_CORE_CAP_ETH_AH)
Ira Weinyf9b22e32015-05-13 20:02:59 -0400563#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
564 | RDMA_CORE_CAP_IW_CM)
Ira Weiny65995fe2015-06-06 14:38:32 -0400565#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
566 | RDMA_CORE_CAP_OPA_MAD)
Ira Weinyf9b22e32015-05-13 20:02:59 -0400567
Or Gerlitzaa773bd2017-01-24 13:02:35 +0200568#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
569
Or Gerlitzce1e0552017-01-24 13:02:38 +0200570#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
571
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572struct ib_port_attr {
Eli Cohenfad61ad2016-03-11 22:58:36 +0200573 u64 subnet_prefix;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 enum ib_port_state state;
575 enum ib_mtu max_mtu;
576 enum ib_mtu active_mtu;
577 int gid_tbl_len;
Jason Gunthorpe2f944c02018-07-04 15:57:48 +0300578 unsigned int ip_gids:1;
579 /* This is the value from PortInfo CapabilityMask, defined by IBA */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 u32 port_cap_flags;
581 u32 max_msg_sz;
582 u32 bad_pkey_cntr;
583 u32 qkey_viol_cntr;
584 u16 pkey_tbl_len;
Dasaratharaman Chandramoulidb585402017-06-08 13:37:48 -0400585 u32 sm_lid;
Dasaratharaman Chandramouli582faf32017-06-08 13:37:47 -0400586 u32 lid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 u8 lmc;
588 u8 max_vl_num;
589 u8 sm_sl;
590 u8 subnet_timeout;
591 u8 init_type_reply;
592 u8 active_width;
593 u8 active_speed;
594 u8 phys_state;
Michael Guralnik1e8f43b2018-12-09 11:49:48 +0200595 u16 port_cap_flags2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596};
597
598enum ib_device_modify_flags {
Roland Dreierc5bcbbb2006-02-02 09:47:14 -0800599 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
600 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601};
602
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700603#define IB_DEVICE_NODE_DESC_MAX 64
604
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605struct ib_device_modify {
606 u64 sys_image_guid;
Yuval Shaiabd99fde2016-08-25 10:57:07 -0700607 char node_desc[IB_DEVICE_NODE_DESC_MAX];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608};
609
610enum ib_port_modify_flags {
611 IB_PORT_SHUTDOWN = 1,
612 IB_PORT_INIT_TYPE = (1<<2),
Vishwanathapura, Niranjanacb493662017-06-01 17:04:02 -0700613 IB_PORT_RESET_QKEY_CNTR = (1<<3),
614 IB_PORT_OPA_MASK_CHG = (1<<4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615};
616
617struct ib_port_modify {
618 u32 set_port_cap_mask;
619 u32 clr_port_cap_mask;
620 u8 init_type;
621};
622
623enum ib_event_type {
624 IB_EVENT_CQ_ERR,
625 IB_EVENT_QP_FATAL,
626 IB_EVENT_QP_REQ_ERR,
627 IB_EVENT_QP_ACCESS_ERR,
628 IB_EVENT_COMM_EST,
629 IB_EVENT_SQ_DRAINED,
630 IB_EVENT_PATH_MIG,
631 IB_EVENT_PATH_MIG_ERR,
632 IB_EVENT_DEVICE_FATAL,
633 IB_EVENT_PORT_ACTIVE,
634 IB_EVENT_PORT_ERR,
635 IB_EVENT_LID_CHANGE,
636 IB_EVENT_PKEY_CHANGE,
Roland Dreierd41fcc62005-08-18 12:23:08 -0700637 IB_EVENT_SM_CHANGE,
638 IB_EVENT_SRQ_ERR,
639 IB_EVENT_SRQ_LIMIT_REACHED,
Leonid Arsh63942c92006-06-17 20:37:35 -0700640 IB_EVENT_QP_LAST_WQE_REACHED,
Or Gerlitz761d90e2011-06-15 14:39:29 +0000641 IB_EVENT_CLIENT_REREGISTER,
642 IB_EVENT_GID_CHANGE,
Yishai Hadasf213c052016-05-23 15:20:49 +0300643 IB_EVENT_WQ_FATAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644};
645
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700646const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300647
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648struct ib_event {
649 struct ib_device *device;
650 union {
651 struct ib_cq *cq;
652 struct ib_qp *qp;
Roland Dreierd41fcc62005-08-18 12:23:08 -0700653 struct ib_srq *srq;
Yishai Hadasf213c052016-05-23 15:20:49 +0300654 struct ib_wq *wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 u8 port_num;
656 } element;
657 enum ib_event_type event;
658};
659
660struct ib_event_handler {
661 struct ib_device *device;
662 void (*handler)(struct ib_event_handler *, struct ib_event *);
663 struct list_head list;
664};
665
666#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
667 do { \
668 (_ptr)->device = _device; \
669 (_ptr)->handler = _handler; \
670 INIT_LIST_HEAD(&(_ptr)->list); \
671 } while (0)
672
673struct ib_global_route {
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +0300674 const struct ib_gid_attr *sgid_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 union ib_gid dgid;
676 u32 flow_label;
677 u8 sgid_index;
678 u8 hop_limit;
679 u8 traffic_class;
680};
681
Hal Rosenstock513789e2005-07-27 11:45:34 -0700682struct ib_grh {
Sean Hefty97f52eb2005-08-13 21:05:57 -0700683 __be32 version_tclass_flow;
684 __be16 paylen;
Hal Rosenstock513789e2005-07-27 11:45:34 -0700685 u8 next_hdr;
686 u8 hop_limit;
687 union ib_gid sgid;
688 union ib_gid dgid;
689};
690
Somnath Koturc865f242015-12-23 14:56:51 +0200691union rdma_network_hdr {
692 struct ib_grh ibgrh;
693 struct {
694 /* The IB spec states that if it's IPv4, the header
695 * is located in the last 20 bytes of the header.
696 */
697 u8 reserved[20];
698 struct iphdr roce4grh;
699 };
700};
701
Don Hiatt7dafbab2017-05-12 09:19:55 -0700702#define IB_QPN_MASK 0xFFFFFF
703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704enum {
705 IB_MULTICAST_QPN = 0xffffff
706};
707
Harvey Harrisonf3a7c662009-02-14 22:58:35 -0800708#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
Dennis Dalessandrob4e64392016-01-06 10:04:31 -0800709#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
Sean Hefty97f52eb2005-08-13 21:05:57 -0700710
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711enum ib_ah_flags {
712 IB_AH_GRH = 1
713};
714
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700715enum ib_rate {
716 IB_RATE_PORT_CURRENT = 0,
717 IB_RATE_2_5_GBPS = 2,
718 IB_RATE_5_GBPS = 5,
719 IB_RATE_10_GBPS = 3,
720 IB_RATE_20_GBPS = 6,
721 IB_RATE_30_GBPS = 4,
722 IB_RATE_40_GBPS = 7,
723 IB_RATE_60_GBPS = 8,
724 IB_RATE_80_GBPS = 9,
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300725 IB_RATE_120_GBPS = 10,
726 IB_RATE_14_GBPS = 11,
727 IB_RATE_56_GBPS = 12,
728 IB_RATE_112_GBPS = 13,
729 IB_RATE_168_GBPS = 14,
730 IB_RATE_25_GBPS = 15,
731 IB_RATE_100_GBPS = 16,
732 IB_RATE_200_GBPS = 17,
733 IB_RATE_300_GBPS = 18
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700734};
735
736/**
737 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
738 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
739 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
740 * @rate: rate to convert.
741 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700742__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700743
744/**
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300745 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
746 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
747 * @rate: rate to convert.
748 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700749__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300750
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200751
752/**
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300753 * enum ib_mr_type - memory region type
754 * @IB_MR_TYPE_MEM_REG: memory region that is used for
755 * normal registration
756 * @IB_MR_TYPE_SIGNATURE: memory region that is used for
757 * signature operations (data-integrity
758 * capable regions)
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200759 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
760 * register any arbitrary sg lists (without
761 * the normal mr constraints - see
762 * ib_map_mr_sg)
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200763 */
Sagi Grimberg9bee1782015-07-30 10:32:35 +0300764enum ib_mr_type {
765 IB_MR_TYPE_MEM_REG,
766 IB_MR_TYPE_SIGNATURE,
Sagi Grimbergf5aa9152016-02-29 19:07:32 +0200767 IB_MR_TYPE_SG_GAPS,
Sagi Grimberg17cd3a22014-02-23 14:19:04 +0200768};
769
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200770/**
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300771 * Signature types
772 * IB_SIG_TYPE_NONE: Unprotected.
773 * IB_SIG_TYPE_T10_DIF: Type T10-DIF
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200774 */
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300775enum ib_signature_type {
776 IB_SIG_TYPE_NONE,
777 IB_SIG_TYPE_T10_DIF,
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200778};
779
780/**
781 * Signature T10-DIF block-guard types
782 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
783 * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
784 */
785enum ib_t10_dif_bg_type {
786 IB_T10DIF_CRC,
787 IB_T10DIF_CSUM
788};
789
790/**
791 * struct ib_t10_dif_domain - Parameters specific for T10-DIF
792 * domain.
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200793 * @bg_type: T10-DIF block guard type (CRC|CSUM)
794 * @pi_interval: protection information interval.
795 * @bg: seed of guard computation.
796 * @app_tag: application tag of guard block
797 * @ref_tag: initial guard block reference tag.
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300798 * @ref_remap: Indicate wethear the reftag increments each block
799 * @app_escape: Indicate to skip block check if apptag=0xffff
800 * @ref_escape: Indicate to skip block check if reftag=0xffffffff
801 * @apptag_check_mask: check bitmask of application tag.
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200802 */
803struct ib_t10_dif_domain {
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200804 enum ib_t10_dif_bg_type bg_type;
805 u16 pi_interval;
806 u16 bg;
807 u16 app_tag;
808 u32 ref_tag;
Sagi Grimberg78eda2b2014-08-13 19:54:35 +0300809 bool ref_remap;
810 bool app_escape;
811 bool ref_escape;
812 u16 apptag_check_mask;
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200813};
814
815/**
816 * struct ib_sig_domain - Parameters for signature domain
817 * @sig_type: specific signauture type
818 * @sig: union of all signature domain attributes that may
819 * be used to set domain layout.
820 */
821struct ib_sig_domain {
822 enum ib_signature_type sig_type;
823 union {
824 struct ib_t10_dif_domain dif;
825 } sig;
826};
827
828/**
829 * struct ib_sig_attrs - Parameters for signature handover operation
830 * @check_mask: bitmask for signature byte check (8 bytes)
831 * @mem: memory domain layout desciptor.
832 * @wire: wire domain layout desciptor.
833 */
834struct ib_sig_attrs {
835 u8 check_mask;
836 struct ib_sig_domain mem;
837 struct ib_sig_domain wire;
838};
839
840enum ib_sig_err_type {
841 IB_SIG_BAD_GUARD,
842 IB_SIG_BAD_REFTAG,
843 IB_SIG_BAD_APPTAG,
844};
845
846/**
Max Gurtovoyca24da02018-05-31 11:05:24 +0300847 * Signature check masks (8 bytes in total) according to the T10-PI standard:
848 * -------- -------- ------------
849 * | GUARD | APPTAG | REFTAG |
850 * | 2B | 2B | 4B |
851 * -------- -------- ------------
852 */
853enum {
854 IB_SIG_CHECK_GUARD = 0xc0,
855 IB_SIG_CHECK_APPTAG = 0x30,
856 IB_SIG_CHECK_REFTAG = 0x0f,
857};
858
859/**
Sagi Grimberg1b01d332014-02-23 14:19:05 +0200860 * struct ib_sig_err - signature error descriptor
861 */
862struct ib_sig_err {
863 enum ib_sig_err_type err_type;
864 u32 expected;
865 u32 actual;
866 u64 sig_err_offset;
867 u32 key;
868};
869
870enum ib_mr_status_check {
871 IB_MR_CHECK_SIG_STATUS = 1,
872};
873
874/**
875 * struct ib_mr_status - Memory region status container
876 *
877 * @fail_status: Bitmask of MR checks status. For each
878 * failed check a corresponding status bit is set.
879 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
880 * failure.
881 */
882struct ib_mr_status {
883 u32 fail_status;
884 struct ib_sig_err sig_err;
885};
886
Marcel Apfelbaum71eeba12011-10-05 14:21:47 +0300887/**
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700888 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
889 * enum.
890 * @mult: multiple to convert.
891 */
Roland Dreier8385fd82014-06-04 10:00:16 -0700892__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
Jack Morgensteinbf6a9e32006-04-10 09:43:47 -0700893
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400894enum rdma_ah_attr_type {
Don Hiatt87daac62018-02-01 10:57:03 -0800895 RDMA_AH_ATTR_TYPE_UNDEFINED,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400896 RDMA_AH_ATTR_TYPE_IB,
897 RDMA_AH_ATTR_TYPE_ROCE,
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400898 RDMA_AH_ATTR_TYPE_OPA,
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400899};
900
901struct ib_ah_attr {
902 u16 dlid;
903 u8 src_path_bits;
904};
905
906struct roce_ah_attr {
907 u8 dmac[ETH_ALEN];
908};
909
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400910struct opa_ah_attr {
911 u32 dlid;
912 u8 src_path_bits;
Don Hiattd98bb7f2017-08-04 13:54:16 -0700913 bool make_grd;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400914};
915
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400916struct rdma_ah_attr {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 struct ib_global_route grh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 u8 sl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 u8 static_rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 u8 port_num;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400921 u8 ah_flags;
922 enum rdma_ah_attr_type type;
923 union {
924 struct ib_ah_attr ib;
925 struct roce_ah_attr roce;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -0400926 struct opa_ah_attr opa;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400927 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928};
929
930enum ib_wc_status {
931 IB_WC_SUCCESS,
932 IB_WC_LOC_LEN_ERR,
933 IB_WC_LOC_QP_OP_ERR,
934 IB_WC_LOC_EEC_OP_ERR,
935 IB_WC_LOC_PROT_ERR,
936 IB_WC_WR_FLUSH_ERR,
937 IB_WC_MW_BIND_ERR,
938 IB_WC_BAD_RESP_ERR,
939 IB_WC_LOC_ACCESS_ERR,
940 IB_WC_REM_INV_REQ_ERR,
941 IB_WC_REM_ACCESS_ERR,
942 IB_WC_REM_OP_ERR,
943 IB_WC_RETRY_EXC_ERR,
944 IB_WC_RNR_RETRY_EXC_ERR,
945 IB_WC_LOC_RDD_VIOL_ERR,
946 IB_WC_REM_INV_RD_REQ_ERR,
947 IB_WC_REM_ABORT_ERR,
948 IB_WC_INV_EECN_ERR,
949 IB_WC_INV_EEC_STATE_ERR,
950 IB_WC_FATAL_ERR,
951 IB_WC_RESP_TIMEOUT_ERR,
952 IB_WC_GENERAL_ERR
953};
954
Bart Van Asschedb7489e2015-08-03 10:01:52 -0700955const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
Sagi Grimberg2b1b5b62015-05-18 13:40:28 +0300956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957enum ib_wc_opcode {
958 IB_WC_SEND,
959 IB_WC_RDMA_WRITE,
960 IB_WC_RDMA_READ,
961 IB_WC_COMP_SWAP,
962 IB_WC_FETCH_ADD,
Eli Cohenc93570f2008-04-16 21:09:27 -0700963 IB_WC_LSO,
Steve Wise00f7ec32008-07-14 23:48:45 -0700964 IB_WC_LOCAL_INV,
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +0300965 IB_WC_REG_MR,
Vladimir Sokolovsky5e80ba82010-04-14 17:23:01 +0300966 IB_WC_MASKED_COMP_SWAP,
967 IB_WC_MASKED_FETCH_ADD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968/*
969 * Set value of IB_WC_RECV so consumers can test if a completion is a
970 * receive by testing (opcode & IB_WC_RECV).
971 */
972 IB_WC_RECV = 1 << 7,
973 IB_WC_RECV_RDMA_WITH_IMM
974};
975
976enum ib_wc_flags {
977 IB_WC_GRH = 1,
Steve Wise00f7ec32008-07-14 23:48:45 -0700978 IB_WC_WITH_IMM = (1<<1),
979 IB_WC_WITH_INVALIDATE = (1<<2),
Or Gerlitzd927d502012-01-11 19:03:51 +0200980 IB_WC_IP_CSUM_OK = (1<<3),
Matan Barakdd5f03b2013-12-12 18:03:11 +0200981 IB_WC_WITH_SMAC = (1<<4),
982 IB_WC_WITH_VLAN = (1<<5),
Somnath Koturc865f242015-12-23 14:56:51 +0200983 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984};
985
986struct ib_wc {
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -0800987 union {
988 u64 wr_id;
989 struct ib_cqe *wr_cqe;
990 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 enum ib_wc_status status;
992 enum ib_wc_opcode opcode;
993 u32 vendor_err;
994 u32 byte_len;
Michael S. Tsirkin062dbb62006-12-31 21:09:42 +0200995 struct ib_qp *qp;
Steve Wise00f7ec32008-07-14 23:48:45 -0700996 union {
997 __be32 imm_data;
998 u32 invalidate_rkey;
999 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 u32 src_qp;
Bodong Wangcd2a6e72018-01-12 07:58:41 +02001001 u32 slid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 int wc_flags;
1003 u16 pkey_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 u8 sl;
1005 u8 dlid_path_bits;
1006 u8 port_num; /* valid only for DR SMPs on switches */
Matan Barakdd5f03b2013-12-12 18:03:11 +02001007 u8 smac[ETH_ALEN];
1008 u16 vlan_id;
Somnath Koturc865f242015-12-23 14:56:51 +02001009 u8 network_hdr_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010};
1011
Roland Dreiered23a722007-05-06 21:02:48 -07001012enum ib_cq_notify_flags {
1013 IB_CQ_SOLICITED = 1 << 0,
1014 IB_CQ_NEXT_COMP = 1 << 1,
1015 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1016 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017};
1018
Sean Hefty96104ed2011-05-23 16:31:36 -07001019enum ib_srq_type {
Sean Hefty418d5132011-05-23 19:42:29 -07001020 IB_SRQT_BASIC,
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001021 IB_SRQT_XRC,
1022 IB_SRQT_TM,
Sean Hefty96104ed2011-05-23 16:31:36 -07001023};
1024
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001025static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1026{
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001027 return srq_type == IB_SRQT_XRC ||
1028 srq_type == IB_SRQT_TM;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001029}
1030
Roland Dreierd41fcc62005-08-18 12:23:08 -07001031enum ib_srq_attr_mask {
1032 IB_SRQ_MAX_WR = 1 << 0,
1033 IB_SRQ_LIMIT = 1 << 1,
1034};
1035
1036struct ib_srq_attr {
1037 u32 max_wr;
1038 u32 max_sge;
1039 u32 srq_limit;
1040};
1041
1042struct ib_srq_init_attr {
1043 void (*event_handler)(struct ib_event *, void *);
1044 void *srq_context;
1045 struct ib_srq_attr attr;
Sean Hefty96104ed2011-05-23 16:31:36 -07001046 enum ib_srq_type srq_type;
Sean Hefty418d5132011-05-23 19:42:29 -07001047
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001048 struct {
1049 struct ib_cq *cq;
1050 union {
1051 struct {
1052 struct ib_xrcd *xrcd;
1053 } xrc;
Artemy Kovalyov9c2c8492017-08-17 15:52:05 +03001054
1055 struct {
1056 u32 max_num_tags;
1057 } tag_matching;
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001058 };
Sean Hefty418d5132011-05-23 19:42:29 -07001059 } ext;
Roland Dreierd41fcc62005-08-18 12:23:08 -07001060};
1061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062struct ib_qp_cap {
1063 u32 max_send_wr;
1064 u32 max_recv_wr;
1065 u32 max_send_sge;
1066 u32 max_recv_sge;
1067 u32 max_inline_data;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001068
1069 /*
1070 * Maximum number of rdma_rw_ctx structures in flight at a time.
1071 * ib_create_qp() will calculate the right amount of neededed WRs
1072 * and MRs based on this.
1073 */
1074 u32 max_rdma_ctxs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075};
1076
1077enum ib_sig_type {
1078 IB_SIGNAL_ALL_WR,
1079 IB_SIGNAL_REQ_WR
1080};
1081
1082enum ib_qp_type {
1083 /*
1084 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1085 * here (and in that order) since the MAD layer uses them as
1086 * indices into a 2-entry table.
1087 */
1088 IB_QPT_SMI,
1089 IB_QPT_GSI,
1090
1091 IB_QPT_RC,
1092 IB_QPT_UC,
1093 IB_QPT_UD,
1094 IB_QPT_RAW_IPV6,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001095 IB_QPT_RAW_ETHERTYPE,
Or Gerlitzc938a612012-03-01 12:17:51 +02001096 IB_QPT_RAW_PACKET = 8,
Sean Heftyb42b63c2011-05-23 19:59:25 -07001097 IB_QPT_XRC_INI = 9,
1098 IB_QPT_XRC_TGT,
Jack Morgenstein0134f162013-07-07 17:25:52 +03001099 IB_QPT_MAX,
Moni Shoua8011c1e2018-01-02 16:19:30 +02001100 IB_QPT_DRIVER = 0xFF,
Jack Morgenstein0134f162013-07-07 17:25:52 +03001101 /* Reserve a range for qp types internal to the low level driver.
1102 * These qp types will not be visible at the IB core layer, so the
1103 * IB_QPT_MAX usages should not be affected in the core layer
1104 */
1105 IB_QPT_RESERVED1 = 0x1000,
1106 IB_QPT_RESERVED2,
1107 IB_QPT_RESERVED3,
1108 IB_QPT_RESERVED4,
1109 IB_QPT_RESERVED5,
1110 IB_QPT_RESERVED6,
1111 IB_QPT_RESERVED7,
1112 IB_QPT_RESERVED8,
1113 IB_QPT_RESERVED9,
1114 IB_QPT_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115};
1116
Eli Cohenb846f252008-04-16 21:09:27 -07001117enum ib_qp_create_flags {
Ron Livne47ee1b92008-07-14 23:48:48 -07001118 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1119 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
Leon Romanovsky8a06ce52015-12-20 12:16:10 +02001120 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1121 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1122 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
Matan Barak90f1d1b2013-11-07 15:25:12 +02001123 IB_QP_CREATE_NETIF_QP = 1 << 5,
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001124 IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
Leon Romanovsky7855f582017-05-23 14:38:16 +03001125 /* FREE = 1 << 7, */
Majd Dibbinyb531b902016-04-17 17:19:36 +03001126 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
Noa Osherovich9c2b2702017-01-18 15:39:56 +02001127 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
Yishai Hadas02984cc2017-06-08 16:15:06 +03001128 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
Noa Osheroviche1d2e882017-10-29 13:59:44 +02001129 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
Jack Morgensteind2b57062012-08-03 08:40:37 +00001130 /* reserve bits 26-31 for low level drivers' internal use */
1131 IB_QP_CREATE_RESERVED_START = 1 << 26,
1132 IB_QP_CREATE_RESERVED_END = 1 << 31,
Eli Cohenb846f252008-04-16 21:09:27 -07001133};
1134
Yishai Hadas73c40c62013-08-01 18:49:53 +03001135/*
1136 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1137 * callback to destroy the passed in QP.
1138 */
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140struct ib_qp_init_attr {
Chuck Levereb93c82e2018-09-04 11:45:20 -04001141 /* Consumer's event_handler callback must not block */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 void (*event_handler)(struct ib_event *, void *);
Chuck Levereb93c82e2018-09-04 11:45:20 -04001143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 void *qp_context;
1145 struct ib_cq *send_cq;
1146 struct ib_cq *recv_cq;
1147 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001148 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 struct ib_qp_cap cap;
1150 enum ib_sig_type sq_sig_type;
1151 enum ib_qp_type qp_type;
Nathan Chancellorb56511c2018-09-24 12:57:16 -07001152 u32 create_flags;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001153
1154 /*
1155 * Only needed for special QP types, or when using the RW API.
1156 */
1157 u8 port_num;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001158 struct ib_rwq_ind_table *rwq_ind_tbl;
Yishai Hadas02984cc2017-06-08 16:15:06 +03001159 u32 source_qpn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160};
1161
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001162struct ib_qp_open_attr {
1163 void (*event_handler)(struct ib_event *, void *);
1164 void *qp_context;
1165 u32 qp_num;
1166 enum ib_qp_type qp_type;
1167};
1168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169enum ib_rnr_timeout {
1170 IB_RNR_TIMER_655_36 = 0,
1171 IB_RNR_TIMER_000_01 = 1,
1172 IB_RNR_TIMER_000_02 = 2,
1173 IB_RNR_TIMER_000_03 = 3,
1174 IB_RNR_TIMER_000_04 = 4,
1175 IB_RNR_TIMER_000_06 = 5,
1176 IB_RNR_TIMER_000_08 = 6,
1177 IB_RNR_TIMER_000_12 = 7,
1178 IB_RNR_TIMER_000_16 = 8,
1179 IB_RNR_TIMER_000_24 = 9,
1180 IB_RNR_TIMER_000_32 = 10,
1181 IB_RNR_TIMER_000_48 = 11,
1182 IB_RNR_TIMER_000_64 = 12,
1183 IB_RNR_TIMER_000_96 = 13,
1184 IB_RNR_TIMER_001_28 = 14,
1185 IB_RNR_TIMER_001_92 = 15,
1186 IB_RNR_TIMER_002_56 = 16,
1187 IB_RNR_TIMER_003_84 = 17,
1188 IB_RNR_TIMER_005_12 = 18,
1189 IB_RNR_TIMER_007_68 = 19,
1190 IB_RNR_TIMER_010_24 = 20,
1191 IB_RNR_TIMER_015_36 = 21,
1192 IB_RNR_TIMER_020_48 = 22,
1193 IB_RNR_TIMER_030_72 = 23,
1194 IB_RNR_TIMER_040_96 = 24,
1195 IB_RNR_TIMER_061_44 = 25,
1196 IB_RNR_TIMER_081_92 = 26,
1197 IB_RNR_TIMER_122_88 = 27,
1198 IB_RNR_TIMER_163_84 = 28,
1199 IB_RNR_TIMER_245_76 = 29,
1200 IB_RNR_TIMER_327_68 = 30,
1201 IB_RNR_TIMER_491_52 = 31
1202};
1203
1204enum ib_qp_attr_mask {
1205 IB_QP_STATE = 1,
1206 IB_QP_CUR_STATE = (1<<1),
1207 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1208 IB_QP_ACCESS_FLAGS = (1<<3),
1209 IB_QP_PKEY_INDEX = (1<<4),
1210 IB_QP_PORT = (1<<5),
1211 IB_QP_QKEY = (1<<6),
1212 IB_QP_AV = (1<<7),
1213 IB_QP_PATH_MTU = (1<<8),
1214 IB_QP_TIMEOUT = (1<<9),
1215 IB_QP_RETRY_CNT = (1<<10),
1216 IB_QP_RNR_RETRY = (1<<11),
1217 IB_QP_RQ_PSN = (1<<12),
1218 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1219 IB_QP_ALT_PATH = (1<<14),
1220 IB_QP_MIN_RNR_TIMER = (1<<15),
1221 IB_QP_SQ_PSN = (1<<16),
1222 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1223 IB_QP_PATH_MIG_STATE = (1<<18),
1224 IB_QP_CAP = (1<<19),
Matan Barakdd5f03b2013-12-12 18:03:11 +02001225 IB_QP_DEST_QPN = (1<<20),
Matan Barakaa744cc2015-10-15 18:38:53 +03001226 IB_QP_RESERVED1 = (1<<21),
1227 IB_QP_RESERVED2 = (1<<22),
1228 IB_QP_RESERVED3 = (1<<23),
1229 IB_QP_RESERVED4 = (1<<24),
Bodong Wang528e5a12016-12-01 13:43:14 +02001230 IB_QP_RATE_LIMIT = (1<<25),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231};
1232
1233enum ib_qp_state {
1234 IB_QPS_RESET,
1235 IB_QPS_INIT,
1236 IB_QPS_RTR,
1237 IB_QPS_RTS,
1238 IB_QPS_SQD,
1239 IB_QPS_SQE,
1240 IB_QPS_ERR
1241};
1242
1243enum ib_mig_state {
1244 IB_MIG_MIGRATED,
1245 IB_MIG_REARM,
1246 IB_MIG_ARMED
1247};
1248
Shani Michaeli7083e422013-02-06 16:19:12 +00001249enum ib_mw_type {
1250 IB_MW_TYPE_1 = 1,
1251 IB_MW_TYPE_2 = 2
1252};
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254struct ib_qp_attr {
1255 enum ib_qp_state qp_state;
1256 enum ib_qp_state cur_qp_state;
1257 enum ib_mtu path_mtu;
1258 enum ib_mig_state path_mig_state;
1259 u32 qkey;
1260 u32 rq_psn;
1261 u32 sq_psn;
1262 u32 dest_qp_num;
1263 int qp_access_flags;
1264 struct ib_qp_cap cap;
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04001265 struct rdma_ah_attr ah_attr;
1266 struct rdma_ah_attr alt_ah_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 u16 pkey_index;
1268 u16 alt_pkey_index;
1269 u8 en_sqd_async_notify;
1270 u8 sq_draining;
1271 u8 max_rd_atomic;
1272 u8 max_dest_rd_atomic;
1273 u8 min_rnr_timer;
1274 u8 port_num;
1275 u8 timeout;
1276 u8 retry_cnt;
1277 u8 rnr_retry;
1278 u8 alt_port_num;
1279 u8 alt_timeout;
Bodong Wang528e5a12016-12-01 13:43:14 +02001280 u32 rate_limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281};
1282
1283enum ib_wr_opcode {
Jason Gunthorpe9a597392018-08-14 15:33:02 -07001284 /* These are shared with userspace */
1285 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1286 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1287 IB_WR_SEND = IB_UVERBS_WR_SEND,
1288 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1289 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1290 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1291 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1292 IB_WR_LSO = IB_UVERBS_WR_TSO,
1293 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1294 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1295 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1296 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1297 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1298 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1299 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1300
1301 /* These are kernel only and can not be issued by userspace */
1302 IB_WR_REG_MR = 0x20,
Sagi Grimberg1b01d332014-02-23 14:19:05 +02001303 IB_WR_REG_SIG_MR,
Jason Gunthorpe9a597392018-08-14 15:33:02 -07001304
Jack Morgenstein0134f162013-07-07 17:25:52 +03001305 /* reserve values for low level drivers' internal use.
1306 * These values will not be used at all in the ib core layer.
1307 */
1308 IB_WR_RESERVED1 = 0xf0,
1309 IB_WR_RESERVED2,
1310 IB_WR_RESERVED3,
1311 IB_WR_RESERVED4,
1312 IB_WR_RESERVED5,
1313 IB_WR_RESERVED6,
1314 IB_WR_RESERVED7,
1315 IB_WR_RESERVED8,
1316 IB_WR_RESERVED9,
1317 IB_WR_RESERVED10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318};
1319
1320enum ib_send_flags {
1321 IB_SEND_FENCE = 1,
1322 IB_SEND_SIGNALED = (1<<1),
1323 IB_SEND_SOLICITED = (1<<2),
Eli Cohene0605d92008-01-30 18:30:57 +02001324 IB_SEND_INLINE = (1<<3),
Jack Morgenstein0134f162013-07-07 17:25:52 +03001325 IB_SEND_IP_CSUM = (1<<4),
1326
1327 /* reserve bits 26-31 for low level drivers' internal use */
1328 IB_SEND_RESERVED_START = (1 << 26),
1329 IB_SEND_RESERVED_END = (1 << 31),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330};
1331
1332struct ib_sge {
1333 u64 addr;
1334 u32 length;
1335 u32 lkey;
1336};
1337
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001338struct ib_cqe {
1339 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1340};
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342struct ib_send_wr {
1343 struct ib_send_wr *next;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001344 union {
1345 u64 wr_id;
1346 struct ib_cqe *wr_cqe;
1347 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 struct ib_sge *sg_list;
1349 int num_sge;
1350 enum ib_wr_opcode opcode;
1351 int send_flags;
Roland Dreier0f39cf32008-04-16 21:09:32 -07001352 union {
1353 __be32 imm_data;
1354 u32 invalidate_rkey;
1355 } ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356};
1357
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001358struct ib_rdma_wr {
1359 struct ib_send_wr wr;
1360 u64 remote_addr;
1361 u32 rkey;
1362};
1363
Bart Van Asschef696bf62018-07-18 09:25:14 -07001364static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001365{
1366 return container_of(wr, struct ib_rdma_wr, wr);
1367}
1368
1369struct ib_atomic_wr {
1370 struct ib_send_wr wr;
1371 u64 remote_addr;
1372 u64 compare_add;
1373 u64 swap;
1374 u64 compare_add_mask;
1375 u64 swap_mask;
1376 u32 rkey;
1377};
1378
Bart Van Asschef696bf62018-07-18 09:25:14 -07001379static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001380{
1381 return container_of(wr, struct ib_atomic_wr, wr);
1382}
1383
1384struct ib_ud_wr {
1385 struct ib_send_wr wr;
1386 struct ib_ah *ah;
1387 void *header;
1388 int hlen;
1389 int mss;
1390 u32 remote_qpn;
1391 u32 remote_qkey;
1392 u16 pkey_index; /* valid for GSI only */
1393 u8 port_num; /* valid for DR SMPs on switch only */
1394};
1395
Bart Van Asschef696bf62018-07-18 09:25:14 -07001396static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001397{
1398 return container_of(wr, struct ib_ud_wr, wr);
1399}
1400
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001401struct ib_reg_wr {
1402 struct ib_send_wr wr;
1403 struct ib_mr *mr;
1404 u32 key;
1405 int access;
1406};
1407
Bart Van Asschef696bf62018-07-18 09:25:14 -07001408static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001409{
1410 return container_of(wr, struct ib_reg_wr, wr);
1411}
1412
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001413struct ib_sig_handover_wr {
1414 struct ib_send_wr wr;
1415 struct ib_sig_attrs *sig_attrs;
1416 struct ib_mr *sig_mr;
1417 int access_flags;
1418 struct ib_sge *prot;
1419};
1420
Bart Van Asschef696bf62018-07-18 09:25:14 -07001421static inline const struct ib_sig_handover_wr *
1422sig_handover_wr(const struct ib_send_wr *wr)
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001423{
1424 return container_of(wr, struct ib_sig_handover_wr, wr);
1425}
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427struct ib_recv_wr {
1428 struct ib_recv_wr *next;
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001429 union {
1430 u64 wr_id;
1431 struct ib_cqe *wr_cqe;
1432 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 struct ib_sge *sg_list;
1434 int num_sge;
1435};
1436
1437enum ib_access_flags {
Jason Gunthorpe4fca0372018-07-11 16:20:44 -06001438 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1439 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1440 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1441 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1442 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1443 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1444 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1445 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1446
1447 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448};
1449
Christoph Hellwigb7d3e0a2015-12-23 19:12:47 +01001450/*
1451 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1452 * are hidden here instead of a uapi header!
1453 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454enum ib_mr_rereg_flags {
1455 IB_MR_REREG_TRANS = 1,
1456 IB_MR_REREG_PD = (1<<1),
Matan Barak7e6edb92014-07-31 11:01:28 +03001457 IB_MR_REREG_ACCESS = (1<<2),
1458 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459};
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461struct ib_fmr_attr {
1462 int max_pages;
1463 int max_maps;
Or Gerlitzd36f34a2006-02-02 10:43:45 -08001464 u8 page_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465};
1466
Haggai Eran882214e2014-12-11 17:04:18 +02001467struct ib_umem;
1468
Matan Barak38321252017-04-04 13:31:42 +03001469enum rdma_remove_reason {
Yishai Hadas1c774832018-06-20 17:11:39 +03001470 /*
1471 * Userspace requested uobject deletion or initial try
1472 * to remove uobject via cleanup. Call could fail
1473 */
Matan Barak38321252017-04-04 13:31:42 +03001474 RDMA_REMOVE_DESTROY,
1475 /* Context deletion. This call should delete the actual object itself */
1476 RDMA_REMOVE_CLOSE,
1477 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1478 RDMA_REMOVE_DRIVER_REMOVE,
Jason Gunthorpe87ad80a2018-07-25 21:40:12 -06001479 /* uobj is being cleaned-up before being committed */
1480 RDMA_REMOVE_ABORT,
Matan Barak38321252017-04-04 13:31:42 +03001481};
1482
Parav Pandit43579b52017-01-10 00:02:14 +00001483struct ib_rdmacg_object {
1484#ifdef CONFIG_CGROUP_RDMA
1485 struct rdma_cgroup *cg; /* owner rdma cgroup */
1486#endif
1487};
1488
Roland Dreiere2773c02005-07-07 17:57:10 -07001489struct ib_ucontext {
1490 struct ib_device *device;
Matan Barak771addf2017-04-04 13:31:41 +03001491 struct ib_uverbs_file *ufile;
Jason Gunthorpee9517472018-07-10 20:55:19 -06001492 /*
1493 * 'closing' can be read by the driver only during a destroy callback,
1494 * it is set when we are closing the file descriptor and indicates
1495 * that mm_sem may be locked.
1496 */
Leon Romanovsky6ceb6332018-09-03 20:18:03 +03001497 bool closing;
Shachar Raindel8ada2c12014-12-11 17:04:17 +02001498
Yishai Hadas1c774832018-06-20 17:11:39 +03001499 bool cleanup_retryable;
Matan Barak38321252017-04-04 13:31:42 +03001500
Haggai Eran882214e2014-12-11 17:04:18 +02001501#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Jason Gunthorpeb5231b02018-09-16 20:48:04 +03001502 void (*invalidate_range)(struct ib_umem_odp *umem_odp,
Haggai Eran882214e2014-12-11 17:04:18 +02001503 unsigned long start, unsigned long end);
Jason Gunthorpef27a0d52018-09-16 20:48:08 +03001504 struct mutex per_mm_list_lock;
1505 struct list_head per_mm_list;
Haggai Eran882214e2014-12-11 17:04:18 +02001506#endif
Parav Pandit43579b52017-01-10 00:02:14 +00001507
1508 struct ib_rdmacg_object cg_obj;
Leon Romanovsky60615212018-11-28 13:16:43 +02001509 /*
1510 * Implementation details of the RDMA core, don't use in drivers:
1511 */
1512 struct rdma_restrack_entry res;
Roland Dreiere2773c02005-07-07 17:57:10 -07001513};
1514
1515struct ib_uobject {
1516 u64 user_handle; /* handle given to us by userspace */
Jason Gunthorpe6a5e9c82018-07-04 11:32:07 +03001517 /* ufile & ucontext owning this object */
1518 struct ib_uverbs_file *ufile;
1519 /* FIXME, save memory: ufile->context == context */
Roland Dreiere2773c02005-07-07 17:57:10 -07001520 struct ib_ucontext *context; /* associated user context */
Roland Dreier9ead1902006-06-17 20:44:49 -07001521 void *object; /* containing object */
Roland Dreiere2773c02005-07-07 17:57:10 -07001522 struct list_head list; /* link to context's list */
Parav Pandit43579b52017-01-10 00:02:14 +00001523 struct ib_rdmacg_object cg_obj; /* rdmacg object */
Roland Dreierb3d636b2008-04-16 21:01:06 -07001524 int id; /* index into kernel idr */
Roland Dreier9ead1902006-06-17 20:44:49 -07001525 struct kref ref;
Matan Barak38321252017-04-04 13:31:42 +03001526 atomic_t usecnt; /* protects exclusive access */
Mike Marciniszynd144da82015-11-02 12:13:25 -05001527 struct rcu_head rcu; /* kfree_rcu() overhead */
Matan Barak38321252017-04-04 13:31:42 +03001528
Jason Gunthorpe6b0d08f2018-08-09 20:14:37 -06001529 const struct uverbs_api_object *uapi_object;
Roland Dreiere2773c02005-07-07 17:57:10 -07001530};
1531
Roland Dreiere2773c02005-07-07 17:57:10 -07001532struct ib_udata {
Yann Droneaud309243e2013-12-11 23:01:44 +01001533 const void __user *inbuf;
Roland Dreiere2773c02005-07-07 17:57:10 -07001534 void __user *outbuf;
1535 size_t inlen;
1536 size_t outlen;
1537};
1538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539struct ib_pd {
Jason Gunthorpe96249d72015-08-05 14:14:45 -06001540 u32 local_dma_lkey;
Christoph Hellwiged082d32016-09-05 12:56:17 +02001541 u32 flags;
Roland Dreiere2773c02005-07-07 17:57:10 -07001542 struct ib_device *device;
1543 struct ib_uobject *uobject;
1544 atomic_t usecnt; /* count all resources */
Christoph Hellwig50d46332016-09-05 12:56:16 +02001545
Christoph Hellwiged082d32016-09-05 12:56:17 +02001546 u32 unsafe_global_rkey;
1547
Christoph Hellwig50d46332016-09-05 12:56:16 +02001548 /*
1549 * Implementation details of the RDMA core, don't use in drivers:
1550 */
1551 struct ib_mr *__internal_mr;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001552 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553};
1554
Sean Hefty59991f92011-05-23 17:52:46 -07001555struct ib_xrcd {
1556 struct ib_device *device;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001557 atomic_t usecnt; /* count all exposed resources */
Sean Hefty53d0bd12011-05-24 08:33:46 -07001558 struct inode *inode;
Sean Heftyd3d72d92011-05-26 23:06:44 -07001559
1560 struct mutex tgt_qp_mutex;
1561 struct list_head tgt_qp_list;
Sean Hefty59991f92011-05-23 17:52:46 -07001562};
1563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564struct ib_ah {
1565 struct ib_device *device;
1566 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001567 struct ib_uobject *uobject;
Jason Gunthorpe1a1f4602018-06-13 10:22:08 +03001568 const struct ib_gid_attr *sgid_attr;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001569 enum rdma_ah_attr_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570};
1571
1572typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1573
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001574enum ib_poll_context {
Jack Morgensteinf7948092018-08-27 08:35:55 +03001575 IB_POLL_DIRECT, /* caller context, no hw completions */
1576 IB_POLL_SOFTIRQ, /* poll from softirq context */
1577 IB_POLL_WORKQUEUE, /* poll from workqueue */
1578 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001579};
1580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581struct ib_cq {
Roland Dreiere2773c02005-07-07 17:57:10 -07001582 struct ib_device *device;
1583 struct ib_uobject *uobject;
1584 ib_comp_handler comp_handler;
1585 void (*event_handler)(struct ib_event *, void *);
Dotan Barak4deccd62008-07-14 23:48:44 -07001586 void *cq_context;
Roland Dreiere2773c02005-07-07 17:57:10 -07001587 int cqe;
1588 atomic_t usecnt; /* count number of work queues */
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001589 enum ib_poll_context poll_ctx;
1590 struct ib_wc *wc;
1591 union {
1592 struct irq_poll iop;
1593 struct work_struct work;
1594 };
Jack Morgensteinf7948092018-08-27 08:35:55 +03001595 struct workqueue_struct *comp_wq;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001596 /*
1597 * Implementation details of the RDMA core, don't use in drivers:
1598 */
1599 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600};
1601
1602struct ib_srq {
Roland Dreierd41fcc62005-08-18 12:23:08 -07001603 struct ib_device *device;
1604 struct ib_pd *pd;
1605 struct ib_uobject *uobject;
1606 void (*event_handler)(struct ib_event *, void *);
1607 void *srq_context;
Sean Hefty96104ed2011-05-23 16:31:36 -07001608 enum ib_srq_type srq_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 atomic_t usecnt;
Sean Hefty418d5132011-05-23 19:42:29 -07001610
Artemy Kovalyov1a56ff62017-08-17 15:52:04 +03001611 struct {
1612 struct ib_cq *cq;
1613 union {
1614 struct {
1615 struct ib_xrcd *xrcd;
1616 u32 srq_num;
1617 } xrc;
1618 };
Sean Hefty418d5132011-05-23 19:42:29 -07001619 } ext;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620};
1621
Noa Osherovichebaaee22017-01-18 15:39:54 +02001622enum ib_raw_packet_caps {
1623 /* Strip cvlan from incoming packet and report it in the matching work
1624 * completion is supported.
1625 */
1626 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1627 /* Scatter FCS field of an incoming packet to host memory is supported.
1628 */
1629 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1630 /* Checksum offloads are supported (for both send and receive). */
1631 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
Maor Gottlieb7d9336d2017-05-30 10:29:10 +03001632 /* When a packet is received for an RQ with no receive WQEs, the
1633 * packet processing is delayed.
1634 */
1635 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
Noa Osherovichebaaee22017-01-18 15:39:54 +02001636};
1637
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001638enum ib_wq_type {
1639 IB_WQT_RQ
1640};
1641
1642enum ib_wq_state {
1643 IB_WQS_RESET,
1644 IB_WQS_RDY,
1645 IB_WQS_ERR
1646};
1647
1648struct ib_wq {
1649 struct ib_device *device;
1650 struct ib_uobject *uobject;
1651 void *wq_context;
1652 void (*event_handler)(struct ib_event *, void *);
1653 struct ib_pd *pd;
1654 struct ib_cq *cq;
1655 u32 wq_num;
1656 enum ib_wq_state state;
1657 enum ib_wq_type wq_type;
1658 atomic_t usecnt;
1659};
1660
Noa Osherovich10bac722017-01-18 15:39:55 +02001661enum ib_wq_flags {
1662 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
Noa Osherovich27b0df12017-01-18 15:39:57 +02001663 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
Maor Gottlieb7d9336d2017-05-30 10:29:10 +03001664 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
Noa Osheroviche1d2e882017-10-29 13:59:44 +02001665 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
Noa Osherovich10bac722017-01-18 15:39:55 +02001666};
1667
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001668struct ib_wq_init_attr {
1669 void *wq_context;
1670 enum ib_wq_type wq_type;
1671 u32 max_wr;
1672 u32 max_sge;
1673 struct ib_cq *cq;
1674 void (*event_handler)(struct ib_event *, void *);
Noa Osherovich10bac722017-01-18 15:39:55 +02001675 u32 create_flags; /* Use enum ib_wq_flags */
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001676};
1677
1678enum ib_wq_attr_mask {
Noa Osherovich10bac722017-01-18 15:39:55 +02001679 IB_WQ_STATE = 1 << 0,
1680 IB_WQ_CUR_STATE = 1 << 1,
1681 IB_WQ_FLAGS = 1 << 2,
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001682};
1683
1684struct ib_wq_attr {
1685 enum ib_wq_state wq_state;
1686 enum ib_wq_state curr_wq_state;
Noa Osherovich10bac722017-01-18 15:39:55 +02001687 u32 flags; /* Use enum ib_wq_flags */
1688 u32 flags_mask; /* Use enum ib_wq_flags */
Yishai Hadas5fd251c2016-05-23 15:20:48 +03001689};
1690
Yishai Hadas6d397862016-05-23 15:20:51 +03001691struct ib_rwq_ind_table {
1692 struct ib_device *device;
1693 struct ib_uobject *uobject;
1694 atomic_t usecnt;
1695 u32 ind_tbl_num;
1696 u32 log_ind_tbl_size;
1697 struct ib_wq **ind_tbl;
1698};
1699
1700struct ib_rwq_ind_table_init_attr {
1701 u32 log_ind_tbl_size;
1702 /* Each entry is a pointer to Receive Work Queue */
1703 struct ib_wq **ind_tbl;
1704};
1705
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001706enum port_pkey_state {
1707 IB_PORT_PKEY_NOT_VALID = 0,
1708 IB_PORT_PKEY_VALID = 1,
1709 IB_PORT_PKEY_LISTED = 2,
1710};
1711
1712struct ib_qp_security;
1713
1714struct ib_port_pkey {
1715 enum port_pkey_state state;
1716 u16 pkey_index;
1717 u8 port_num;
1718 struct list_head qp_list;
1719 struct list_head to_error_list;
1720 struct ib_qp_security *sec;
1721};
1722
1723struct ib_ports_pkeys {
1724 struct ib_port_pkey main;
1725 struct ib_port_pkey alt;
1726};
1727
1728struct ib_qp_security {
1729 struct ib_qp *qp;
1730 struct ib_device *dev;
1731 /* Hold this mutex when changing port and pkey settings. */
1732 struct mutex mutex;
1733 struct ib_ports_pkeys *ports_pkeys;
1734 /* A list of all open shared QP handles. Required to enforce security
1735 * properly for all users of a shared QP.
1736 */
1737 struct list_head shared_qp_list;
1738 void *security;
1739 bool destroying;
1740 atomic_t error_list_count;
1741 struct completion error_complete;
1742 int error_comps_pending;
1743};
1744
Bart Van Assche632bc3f2016-07-21 13:03:30 -07001745/*
1746 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1747 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1748 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749struct ib_qp {
1750 struct ib_device *device;
1751 struct ib_pd *pd;
1752 struct ib_cq *send_cq;
1753 struct ib_cq *recv_cq;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001754 spinlock_t mr_lock;
1755 int mrs_used;
Christoph Hellwiga060b562016-05-03 18:01:09 +02001756 struct list_head rdma_mrs;
Christoph Hellwig0e353e32016-05-03 18:01:12 +02001757 struct list_head sig_mrs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 struct ib_srq *srq;
Sean Heftyb42b63c2011-05-23 19:59:25 -07001759 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
Sean Heftyd3d72d92011-05-26 23:06:44 -07001760 struct list_head xrcd_list;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001761
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001762 /* count times opened, mcast attaches, flow attaches */
1763 atomic_t usecnt;
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07001764 struct list_head open_list;
1765 struct ib_qp *real_qp;
Roland Dreiere2773c02005-07-07 17:57:10 -07001766 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 void (*event_handler)(struct ib_event *, void *);
1768 void *qp_context;
Jason Gunthorpe1a1f4602018-06-13 10:22:08 +03001769 /* sgid_attrs associated with the AV's */
1770 const struct ib_gid_attr *av_sgid_attr;
1771 const struct ib_gid_attr *alt_path_sgid_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 u32 qp_num;
Bart Van Assche632bc3f2016-07-21 13:03:30 -07001773 u32 max_write_sge;
1774 u32 max_read_sge;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 enum ib_qp_type qp_type;
Yishai Hadasa9017e22016-05-23 15:20:54 +03001776 struct ib_rwq_ind_table *rwq_ind_tbl;
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03001777 struct ib_qp_security *qp_sec;
Noa Osherovich498ca3c2017-08-23 08:35:40 +03001778 u8 port;
Leon Romanovsky02d88832018-01-28 11:17:20 +02001779
1780 /*
1781 * Implementation details of the RDMA core, don't use in drivers:
1782 */
1783 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784};
1785
Ariel Levkovichbee76d72018-04-05 18:53:24 +03001786struct ib_dm {
1787 struct ib_device *device;
1788 u32 length;
1789 u32 flags;
1790 struct ib_uobject *uobject;
1791 atomic_t usecnt;
1792};
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794struct ib_mr {
Roland Dreiere2773c02005-07-07 17:57:10 -07001795 struct ib_device *device;
1796 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001797 u32 lkey;
1798 u32 rkey;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001799 u64 iova;
Parav Panditedd31552017-09-24 21:46:31 +03001800 u64 length;
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03001801 unsigned int page_size;
Steve Wised4a85c32016-05-03 18:01:08 +02001802 bool need_inval;
Christoph Hellwigfffb0382016-05-03 18:01:07 +02001803 union {
1804 struct ib_uobject *uobject; /* user */
1805 struct list_head qp_entry; /* FR */
1806 };
Steve Wisefccec5b2018-03-01 13:58:13 -08001807
Ariel Levkovichbe934cc2018-04-05 18:53:25 +03001808 struct ib_dm *dm;
1809
Steve Wisefccec5b2018-03-01 13:58:13 -08001810 /*
1811 * Implementation details of the RDMA core, don't use in drivers:
1812 */
1813 struct rdma_restrack_entry res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814};
1815
1816struct ib_mw {
1817 struct ib_device *device;
1818 struct ib_pd *pd;
Roland Dreiere2773c02005-07-07 17:57:10 -07001819 struct ib_uobject *uobject;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 u32 rkey;
Shani Michaeli7083e422013-02-06 16:19:12 +00001821 enum ib_mw_type type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822};
1823
1824struct ib_fmr {
1825 struct ib_device *device;
1826 struct ib_pd *pd;
1827 struct list_head list;
1828 u32 lkey;
1829 u32 rkey;
1830};
1831
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001832/* Supported steering options */
1833enum ib_flow_attr_type {
1834 /* steering according to rule specifications */
1835 IB_FLOW_ATTR_NORMAL = 0x0,
1836 /* default unicast and multicast rule -
1837 * receive all Eth traffic which isn't steered to any QP
1838 */
1839 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1840 /* default multicast rule -
1841 * receive all Eth multicast traffic which isn't steered to any QP
1842 */
1843 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1844 /* sniffer rule - receive all port traffic */
1845 IB_FLOW_ATTR_SNIFFER = 0x3
1846};
1847
1848/* Supported steering header types */
1849enum ib_flow_spec_type {
1850 /* L2 headers*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001851 IB_FLOW_SPEC_ETH = 0x20,
1852 IB_FLOW_SPEC_IB = 0x22,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001853 /* L3 header*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001854 IB_FLOW_SPEC_IPV4 = 0x30,
1855 IB_FLOW_SPEC_IPV6 = 0x31,
Matan Barak56ab0b32018-03-28 09:27:49 +03001856 IB_FLOW_SPEC_ESP = 0x34,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001857 /* L4 headers*/
Moses Reuben76bd23b2016-11-14 19:04:48 +02001858 IB_FLOW_SPEC_TCP = 0x40,
1859 IB_FLOW_SPEC_UDP = 0x41,
Moses Reuben0dbf3332016-11-14 19:04:47 +02001860 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03001861 IB_FLOW_SPEC_GRE = 0x51,
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03001862 IB_FLOW_SPEC_MPLS = 0x60,
Moses Reubenfbf46862016-11-14 19:04:51 +02001863 IB_FLOW_SPEC_INNER = 0x100,
Moses Reuben460d0192017-01-18 14:59:48 +02001864 /* Actions */
1865 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
Slava Shwartsman483a3962017-04-03 13:13:51 +03001866 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
Matan Barak9b828442018-03-28 09:27:46 +03001867 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
Raed Salem7eea23a2018-05-31 16:43:36 +03001868 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001869};
Matan Barak240ae002013-11-07 15:25:13 +02001870#define IB_FLOW_SPEC_LAYER_MASK 0xF0
Raed Salem7eea23a2018-05-31 16:43:36 +03001871#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
Matan Barak22878db2013-09-01 18:39:52 +03001872
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001873/* Flow steering rule priority is set according to it's domain.
1874 * Lower domain value means higher priority.
1875 */
1876enum ib_flow_domain {
1877 IB_FLOW_DOMAIN_USER,
1878 IB_FLOW_DOMAIN_ETHTOOL,
1879 IB_FLOW_DOMAIN_RFS,
1880 IB_FLOW_DOMAIN_NIC,
1881 IB_FLOW_DOMAIN_NUM /* Must be last */
1882};
1883
Marina Varshavera3100a72016-02-18 18:31:05 +02001884enum ib_flow_flags {
1885 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
Boris Pismenny21e82d32018-03-28 09:27:47 +03001886 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
1887 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
Marina Varshavera3100a72016-02-18 18:31:05 +02001888};
1889
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001890struct ib_flow_eth_filter {
1891 u8 dst_mac[6];
1892 u8 src_mac[6];
1893 __be16 ether_type;
1894 __be16 vlan_tag;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001895 /* Must be last */
1896 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001897};
1898
1899struct ib_flow_spec_eth {
Moses Reubenfbf46862016-11-14 19:04:51 +02001900 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001901 u16 size;
1902 struct ib_flow_eth_filter val;
1903 struct ib_flow_eth_filter mask;
1904};
1905
Matan Barak240ae002013-11-07 15:25:13 +02001906struct ib_flow_ib_filter {
1907 __be16 dlid;
1908 __u8 sl;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001909 /* Must be last */
1910 u8 real_sz[0];
Matan Barak240ae002013-11-07 15:25:13 +02001911};
1912
1913struct ib_flow_spec_ib {
Moses Reubenfbf46862016-11-14 19:04:51 +02001914 u32 type;
Matan Barak240ae002013-11-07 15:25:13 +02001915 u16 size;
1916 struct ib_flow_ib_filter val;
1917 struct ib_flow_ib_filter mask;
1918};
1919
Maor Gottlieb989a3a82016-08-30 16:58:33 +03001920/* IPv4 header flags */
1921enum ib_ipv4_flags {
1922 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1923 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
1924 last have this flag set */
1925};
1926
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001927struct ib_flow_ipv4_filter {
1928 __be32 src_ip;
1929 __be32 dst_ip;
Maor Gottlieb989a3a82016-08-30 16:58:33 +03001930 u8 proto;
1931 u8 tos;
1932 u8 ttl;
1933 u8 flags;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001934 /* Must be last */
1935 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001936};
1937
1938struct ib_flow_spec_ipv4 {
Moses Reubenfbf46862016-11-14 19:04:51 +02001939 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001940 u16 size;
1941 struct ib_flow_ipv4_filter val;
1942 struct ib_flow_ipv4_filter mask;
1943};
1944
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001945struct ib_flow_ipv6_filter {
1946 u8 src_ip[16];
1947 u8 dst_ip[16];
Maor Gottlieba72c6a22016-08-30 16:58:34 +03001948 __be32 flow_label;
1949 u8 next_hdr;
1950 u8 traffic_class;
1951 u8 hop_limit;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001952 /* Must be last */
1953 u8 real_sz[0];
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001954};
1955
1956struct ib_flow_spec_ipv6 {
Moses Reubenfbf46862016-11-14 19:04:51 +02001957 u32 type;
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03001958 u16 size;
1959 struct ib_flow_ipv6_filter val;
1960 struct ib_flow_ipv6_filter mask;
1961};
1962
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001963struct ib_flow_tcp_udp_filter {
1964 __be16 dst_port;
1965 __be16 src_port;
Maor Gottlieb15dfbd62016-08-30 16:58:32 +03001966 /* Must be last */
1967 u8 real_sz[0];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001968};
1969
1970struct ib_flow_spec_tcp_udp {
Moses Reubenfbf46862016-11-14 19:04:51 +02001971 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03001972 u16 size;
1973 struct ib_flow_tcp_udp_filter val;
1974 struct ib_flow_tcp_udp_filter mask;
1975};
1976
Moses Reuben0dbf3332016-11-14 19:04:47 +02001977struct ib_flow_tunnel_filter {
1978 __be32 tunnel_id;
1979 u8 real_sz[0];
1980};
1981
1982/* ib_flow_spec_tunnel describes the Vxlan tunnel
1983 * the tunnel_id from val has the vni value
1984 */
1985struct ib_flow_spec_tunnel {
Moses Reubenfbf46862016-11-14 19:04:51 +02001986 u32 type;
Moses Reuben0dbf3332016-11-14 19:04:47 +02001987 u16 size;
1988 struct ib_flow_tunnel_filter val;
1989 struct ib_flow_tunnel_filter mask;
1990};
1991
Matan Barak56ab0b32018-03-28 09:27:49 +03001992struct ib_flow_esp_filter {
1993 __be32 spi;
1994 __be32 seq;
1995 /* Must be last */
1996 u8 real_sz[0];
1997};
1998
1999struct ib_flow_spec_esp {
2000 u32 type;
2001 u16 size;
2002 struct ib_flow_esp_filter val;
2003 struct ib_flow_esp_filter mask;
2004};
2005
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03002006struct ib_flow_gre_filter {
2007 __be16 c_ks_res0_ver;
2008 __be16 protocol;
2009 __be32 key;
2010 /* Must be last */
2011 u8 real_sz[0];
2012};
2013
2014struct ib_flow_spec_gre {
2015 u32 type;
2016 u16 size;
2017 struct ib_flow_gre_filter val;
2018 struct ib_flow_gre_filter mask;
2019};
2020
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03002021struct ib_flow_mpls_filter {
2022 __be32 tag;
2023 /* Must be last */
2024 u8 real_sz[0];
2025};
2026
2027struct ib_flow_spec_mpls {
2028 u32 type;
2029 u16 size;
2030 struct ib_flow_mpls_filter val;
2031 struct ib_flow_mpls_filter mask;
2032};
2033
Moses Reuben460d0192017-01-18 14:59:48 +02002034struct ib_flow_spec_action_tag {
2035 enum ib_flow_spec_type type;
2036 u16 size;
2037 u32 tag_id;
2038};
2039
Slava Shwartsman483a3962017-04-03 13:13:51 +03002040struct ib_flow_spec_action_drop {
2041 enum ib_flow_spec_type type;
2042 u16 size;
2043};
2044
Matan Barak9b828442018-03-28 09:27:46 +03002045struct ib_flow_spec_action_handle {
2046 enum ib_flow_spec_type type;
2047 u16 size;
2048 struct ib_flow_action *act;
2049};
2050
Raed Salem7eea23a2018-05-31 16:43:36 +03002051enum ib_counters_description {
2052 IB_COUNTER_PACKETS,
2053 IB_COUNTER_BYTES,
2054};
2055
2056struct ib_flow_spec_action_count {
2057 enum ib_flow_spec_type type;
2058 u16 size;
2059 struct ib_counters *counters;
2060};
2061
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002062union ib_flow_spec {
2063 struct {
Moses Reubenfbf46862016-11-14 19:04:51 +02002064 u32 type;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002065 u16 size;
2066 };
2067 struct ib_flow_spec_eth eth;
Matan Barak240ae002013-11-07 15:25:13 +02002068 struct ib_flow_spec_ib ib;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002069 struct ib_flow_spec_ipv4 ipv4;
2070 struct ib_flow_spec_tcp_udp tcp_udp;
Maor Gottlieb4c2aae72016-06-17 15:14:50 +03002071 struct ib_flow_spec_ipv6 ipv6;
Moses Reuben0dbf3332016-11-14 19:04:47 +02002072 struct ib_flow_spec_tunnel tunnel;
Matan Barak56ab0b32018-03-28 09:27:49 +03002073 struct ib_flow_spec_esp esp;
Ariel Levkovichd90e5e52018-05-13 14:33:30 +03002074 struct ib_flow_spec_gre gre;
Ariel Levkovichb04f0f02018-05-13 14:33:32 +03002075 struct ib_flow_spec_mpls mpls;
Moses Reuben460d0192017-01-18 14:59:48 +02002076 struct ib_flow_spec_action_tag flow_tag;
Slava Shwartsman483a3962017-04-03 13:13:51 +03002077 struct ib_flow_spec_action_drop drop;
Matan Barak9b828442018-03-28 09:27:46 +03002078 struct ib_flow_spec_action_handle action;
Raed Salem7eea23a2018-05-31 16:43:36 +03002079 struct ib_flow_spec_action_count flow_count;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002080};
2081
2082struct ib_flow_attr {
2083 enum ib_flow_attr_type type;
2084 u16 size;
2085 u16 priority;
2086 u32 flags;
2087 u8 num_of_specs;
2088 u8 port;
Matthew Wilcox7654cb12018-06-07 07:57:16 -07002089 union ib_flow_spec flows[];
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002090};
2091
2092struct ib_flow {
2093 struct ib_qp *qp;
Yishai Hadas6cd080a2018-07-23 15:25:08 +03002094 struct ib_device *device;
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002095 struct ib_uobject *uobject;
2096};
2097
Matan Barak2eb9bea2018-03-28 09:27:45 +03002098enum ib_flow_action_type {
2099 IB_FLOW_ACTION_UNSPECIFIED,
2100 IB_FLOW_ACTION_ESP = 1,
2101};
2102
2103struct ib_flow_action_attrs_esp_keymats {
2104 enum ib_uverbs_flow_action_esp_keymat protocol;
2105 union {
2106 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2107 } keymat;
2108};
2109
2110struct ib_flow_action_attrs_esp_replays {
2111 enum ib_uverbs_flow_action_esp_replay protocol;
2112 union {
2113 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2114 } replay;
2115};
2116
2117enum ib_flow_action_attrs_esp_flags {
2118 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2119 * This is done in order to share the same flags between user-space and
2120 * kernel and spare an unnecessary translation.
2121 */
2122
2123 /* Kernel flags */
2124 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
Matan Barak7d12f8d2018-03-28 09:27:48 +03002125 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
Matan Barak2eb9bea2018-03-28 09:27:45 +03002126};
2127
2128struct ib_flow_spec_list {
2129 struct ib_flow_spec_list *next;
2130 union ib_flow_spec spec;
2131};
2132
2133struct ib_flow_action_attrs_esp {
2134 struct ib_flow_action_attrs_esp_keymats *keymat;
2135 struct ib_flow_action_attrs_esp_replays *replay;
2136 struct ib_flow_spec_list *encap;
2137 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2138 * Value of 0 is a valid value.
2139 */
2140 u32 esn;
2141 u32 spi;
2142 u32 seq;
2143 u32 tfc_pad;
2144 /* Use enum ib_flow_action_attrs_esp_flags */
2145 u64 flags;
2146 u64 hard_limit_pkts;
2147};
2148
2149struct ib_flow_action {
2150 struct ib_device *device;
2151 struct ib_uobject *uobject;
2152 enum ib_flow_action_type type;
2153 atomic_t usecnt;
2154};
2155
Ira Weiny4cd7c942015-06-06 14:38:31 -04002156struct ib_mad_hdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157struct ib_grh;
2158
2159enum ib_process_mad_flags {
2160 IB_MAD_IGNORE_MKEY = 1,
2161 IB_MAD_IGNORE_BKEY = 2,
2162 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2163};
2164
2165enum ib_mad_result {
2166 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
2167 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
2168 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
2169 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
2170};
2171
Jack Wang21d64542017-01-17 10:11:12 +01002172struct ib_port_cache {
Daniel Jurgens883c71f2017-05-19 15:48:51 +03002173 u64 subnet_prefix;
Jack Wang21d64542017-01-17 10:11:12 +01002174 struct ib_pkey_cache *pkey;
2175 struct ib_gid_table *gid;
2176 u8 lmc;
2177 enum ib_port_state port_state;
2178};
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180struct ib_cache {
2181 rwlock_t lock;
2182 struct ib_event_handler event_handler;
Jack Wang21d64542017-01-17 10:11:12 +01002183 struct ib_port_cache *ports;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184};
2185
Tom Tucker07ebafb2006-08-03 16:02:42 -05002186struct iw_cm_verbs;
2187
Ira Weiny77386132015-05-13 20:02:58 -04002188struct ib_port_immutable {
2189 int pkey_tbl_len;
2190 int gid_tbl_len;
Ira Weinyf9b22e32015-05-13 20:02:59 -04002191 u32 core_cap_flags;
Ira Weiny337877a2015-06-06 14:38:29 -04002192 u32 max_mad_size;
Ira Weiny77386132015-05-13 20:02:58 -04002193};
2194
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002195/* rdma netdev type - specifies protocol type */
2196enum rdma_netdev_t {
Niranjana Vishwanathapuraf0ad83ac2017-04-10 11:22:25 +03002197 RDMA_NETDEV_OPA_VNIC,
2198 RDMA_NETDEV_IPOIB,
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002199};
2200
2201/**
2202 * struct rdma_netdev - rdma netdev
2203 * For cases where netstack interfacing is required.
2204 */
2205struct rdma_netdev {
2206 void *clnt_priv;
2207 struct ib_device *hca;
2208 u8 port_num;
2209
Jason Gunthorpe9f49a5b2018-07-29 11:34:56 +03002210 /*
2211 * cleanup function must be specified.
2212 * FIXME: This is only used for OPA_VNIC and that usage should be
2213 * removed too.
2214 */
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07002215 void (*free_rdma_netdev)(struct net_device *netdev);
2216
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002217 /* control functions */
2218 void (*set_id)(struct net_device *netdev, int id);
Niranjana Vishwanathapuraf0ad83ac2017-04-10 11:22:25 +03002219 /* send packet */
2220 int (*send)(struct net_device *dev, struct sk_buff *skb,
2221 struct ib_ah *address, u32 dqpn);
2222 /* multicast */
2223 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2224 union ib_gid *gid, u16 mlid,
2225 int set_qkey, u32 qkey);
2226 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2227 union ib_gid *gid, u16 mlid);
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002228};
2229
Denis Drozdovf6a8a192018-08-14 14:08:51 +03002230struct rdma_netdev_alloc_params {
2231 size_t sizeof_priv;
2232 unsigned int txqs;
2233 unsigned int rxqs;
2234 void *param;
2235
2236 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2237 struct net_device *netdev, void *param);
2238};
2239
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03002240struct ib_port_pkey_list {
2241 /* Lock to hold while modifying the list. */
2242 spinlock_t list_lock;
2243 struct list_head pkey_list;
2244};
2245
Raed Salemfa9b1802018-05-31 16:43:31 +03002246struct ib_counters {
2247 struct ib_device *device;
2248 struct ib_uobject *uobject;
2249 /* num of objects attached */
2250 atomic_t usecnt;
2251};
2252
Raed Salem51d7a532018-05-31 16:43:33 +03002253struct ib_counters_read_attr {
2254 u64 *counters_buff;
2255 u32 ncounters;
2256 u32 flags; /* use enum ib_read_counters_flags */
2257};
2258
Matan Barak2eb9bea2018-03-28 09:27:45 +03002259struct uverbs_attr_bundle;
2260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261struct ib_device {
Bart Van Assche0957c292017-03-07 22:56:53 +00002262 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2263 struct device *dma_device;
2264
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 char name[IB_DEVICE_NAME_MAX];
2266
2267 struct list_head event_handler_list;
2268 spinlock_t event_handler_lock;
2269
Parav Pandite1f540c2018-08-28 15:08:45 +03002270 rwlock_t client_data_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 struct list_head core_list;
Haggai Eran7c1eb452015-07-30 17:50:14 +03002272 /* Access to the client_data_list is protected by the client_data_lock
Parav Pandite1f540c2018-08-28 15:08:45 +03002273 * rwlock and the lists_rwsem read-write semaphore
2274 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 struct list_head client_data_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
2277 struct ib_cache cache;
Ira Weiny77386132015-05-13 20:02:58 -04002278 /**
2279 * port_immutable is indexed by port number
2280 */
2281 struct ib_port_immutable *port_immutable;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
Michael S. Tsirkinf4fd0b22007-05-03 13:48:47 +03002283 int num_comp_vectors;
2284
Daniel Jurgensd291f1a2017-05-19 15:48:52 +03002285 struct ib_port_pkey_list *port_pkey_list;
2286
Tom Tucker07ebafb2006-08-03 16:02:42 -05002287 struct iw_cm_verbs *iwcm;
2288
Christoph Lameterb40f4752016-05-16 12:49:33 -05002289 /**
2290 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2291 * driver initialized data. The struct is kfree()'ed by the sysfs
2292 * core when the device is removed. A lifespan of -1 in the return
2293 * struct tells the core to set a default lifespan.
2294 */
2295 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2296 u8 port_num);
2297 /**
2298 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2299 * @index - The index in the value array we wish to have updated, or
2300 * num_counters if we want all stats updated
2301 * Return codes -
2302 * < 0 - Error, no counters updated
2303 * index - Updated the single counter pointed to by index
2304 * num_counters - Updated all counters (will reset the timestamp
2305 * and prevent further calls for lifespan milliseconds)
Rami Rosence1fd6b2018-10-28 21:04:00 +02002306 * Drivers are allowed to update all counters in lieu of just the
Christoph Lameterb40f4752016-05-16 12:49:33 -05002307 * one given in index at their option
2308 */
2309 int (*get_hw_stats)(struct ib_device *device,
2310 struct rdma_hw_stats *stats,
2311 u8 port, int index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 int (*query_device)(struct ib_device *device,
Matan Barak2528e332015-06-11 16:35:25 +03002313 struct ib_device_attr *device_attr,
2314 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 int (*query_port)(struct ib_device *device,
2316 u8 port_num,
2317 struct ib_port_attr *port_attr);
Eli Cohena3f5ada2010-09-27 17:51:10 -07002318 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2319 u8 port_num);
Matan Barak03db3a22015-07-30 18:33:26 +03002320 /* When calling get_netdev, the HW vendor's driver should return the
2321 * net device of device @device at port @port_num or NULL if such
2322 * a net device doesn't exist. The vendor driver should call dev_hold
2323 * on this net device. The HW vendor's device driver must guarantee
Kirill Tkhai070f2d72018-03-23 19:47:39 +03002324 * that this function returns NULL before the net device has finished
2325 * NETDEV_UNREGISTER state.
Matan Barak03db3a22015-07-30 18:33:26 +03002326 */
2327 struct net_device *(*get_netdev)(struct ib_device *device,
2328 u8 port_num);
Parav Pandit72e1ff02018-04-01 15:08:18 +03002329 /* query_gid should be return GID value for @device, when @port_num
2330 * link layer is either IB or iWarp. It is no-op if @port_num port
2331 * is RoCE link layer.
2332 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 int (*query_gid)(struct ib_device *device,
2334 u8 port_num, int index,
2335 union ib_gid *gid);
Parav Pandit414448d2018-04-01 15:08:24 +03002336 /* When calling add_gid, the HW vendor's driver should add the gid
2337 * of device of port at gid index available at @attr. Meta-info of
2338 * that gid (for example, the network device related to this gid) is
2339 * available at @attr. @context allows the HW vendor driver to store
2340 * extra information together with a GID entry. The HW vendor driver may
2341 * allocate memory to contain this information and store it in @context
2342 * when a new GID entry is written to. Params are consistent until the
2343 * next call of add_gid or delete_gid. The function should return 0 on
Matan Barak03db3a22015-07-30 18:33:26 +03002344 * success or error otherwise. The function could be called
Parav Pandit414448d2018-04-01 15:08:24 +03002345 * concurrently for different ports. This function is only called when
2346 * roce_gid_table is used.
Matan Barak03db3a22015-07-30 18:33:26 +03002347 */
Parav Panditf4df9a72018-06-05 08:40:16 +03002348 int (*add_gid)(const struct ib_gid_attr *attr,
Matan Barak03db3a22015-07-30 18:33:26 +03002349 void **context);
2350 /* When calling del_gid, the HW vendor's driver should delete the
Parav Pandit414448d2018-04-01 15:08:24 +03002351 * gid of device @device at gid index gid_index of port port_num
2352 * available in @attr.
Matan Barak03db3a22015-07-30 18:33:26 +03002353 * Upon the deletion of a GID entry, the HW vendor must free any
2354 * allocated memory. The caller will clear @context afterwards.
2355 * This function is only called when roce_gid_table is used.
2356 */
Parav Pandit414448d2018-04-01 15:08:24 +03002357 int (*del_gid)(const struct ib_gid_attr *attr,
Matan Barak03db3a22015-07-30 18:33:26 +03002358 void **context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 int (*query_pkey)(struct ib_device *device,
2360 u8 port_num, u16 index, u16 *pkey);
2361 int (*modify_device)(struct ib_device *device,
2362 int device_modify_mask,
2363 struct ib_device_modify *device_modify);
2364 int (*modify_port)(struct ib_device *device,
2365 u8 port_num, int port_modify_mask,
2366 struct ib_port_modify *port_modify);
Roland Dreiere2773c02005-07-07 17:57:10 -07002367 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
2368 struct ib_udata *udata);
2369 int (*dealloc_ucontext)(struct ib_ucontext *context);
2370 int (*mmap)(struct ib_ucontext *context,
2371 struct vm_area_struct *vma);
2372 struct ib_pd * (*alloc_pd)(struct ib_device *device,
2373 struct ib_ucontext *context,
2374 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 int (*dealloc_pd)(struct ib_pd *pd);
2376 struct ib_ah * (*create_ah)(struct ib_pd *pd,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002377 struct rdma_ah_attr *ah_attr,
Moni Shoua477864c2016-11-23 08:23:24 +02002378 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 int (*modify_ah)(struct ib_ah *ah,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002380 struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 int (*query_ah)(struct ib_ah *ah,
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -04002382 struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 int (*destroy_ah)(struct ib_ah *ah);
Roland Dreierd41fcc62005-08-18 12:23:08 -07002384 struct ib_srq * (*create_srq)(struct ib_pd *pd,
2385 struct ib_srq_init_attr *srq_init_attr,
2386 struct ib_udata *udata);
2387 int (*modify_srq)(struct ib_srq *srq,
2388 struct ib_srq_attr *srq_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07002389 enum ib_srq_attr_mask srq_attr_mask,
2390 struct ib_udata *udata);
Roland Dreierd41fcc62005-08-18 12:23:08 -07002391 int (*query_srq)(struct ib_srq *srq,
2392 struct ib_srq_attr *srq_attr);
2393 int (*destroy_srq)(struct ib_srq *srq);
2394 int (*post_srq_recv)(struct ib_srq *srq,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07002395 const struct ib_recv_wr *recv_wr,
2396 const struct ib_recv_wr **bad_recv_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 struct ib_qp * (*create_qp)(struct ib_pd *pd,
Roland Dreiere2773c02005-07-07 17:57:10 -07002398 struct ib_qp_init_attr *qp_init_attr,
2399 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 int (*modify_qp)(struct ib_qp *qp,
2401 struct ib_qp_attr *qp_attr,
Ralph Campbell9bc57e22006-08-11 14:58:09 -07002402 int qp_attr_mask,
2403 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 int (*query_qp)(struct ib_qp *qp,
2405 struct ib_qp_attr *qp_attr,
2406 int qp_attr_mask,
2407 struct ib_qp_init_attr *qp_init_attr);
2408 int (*destroy_qp)(struct ib_qp *qp);
2409 int (*post_send)(struct ib_qp *qp,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07002410 const struct ib_send_wr *send_wr,
2411 const struct ib_send_wr **bad_send_wr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 int (*post_recv)(struct ib_qp *qp,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07002413 const struct ib_recv_wr *recv_wr,
2414 const struct ib_recv_wr **bad_recv_wr);
Matan Barakbcf4c1e2015-06-11 16:35:20 +03002415 struct ib_cq * (*create_cq)(struct ib_device *device,
2416 const struct ib_cq_init_attr *attr,
Roland Dreiere2773c02005-07-07 17:57:10 -07002417 struct ib_ucontext *context,
2418 struct ib_udata *udata);
Eli Cohen2dd57162008-04-16 21:09:33 -07002419 int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2420 u16 cq_period);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 int (*destroy_cq)(struct ib_cq *cq);
Roland Dreier33b9b3e2006-01-30 14:29:21 -08002422 int (*resize_cq)(struct ib_cq *cq, int cqe,
2423 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 int (*poll_cq)(struct ib_cq *cq, int num_entries,
2425 struct ib_wc *wc);
2426 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2427 int (*req_notify_cq)(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07002428 enum ib_cq_notify_flags flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 int (*req_ncomp_notif)(struct ib_cq *cq,
2430 int wc_cnt);
2431 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
2432 int mr_access_flags);
Roland Dreiere2773c02005-07-07 17:57:10 -07002433 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08002434 u64 start, u64 length,
2435 u64 virt_addr,
Roland Dreiere2773c02005-07-07 17:57:10 -07002436 int mr_access_flags,
2437 struct ib_udata *udata);
Matan Barak7e6edb92014-07-31 11:01:28 +03002438 int (*rereg_user_mr)(struct ib_mr *mr,
2439 int flags,
2440 u64 start, u64 length,
2441 u64 virt_addr,
2442 int mr_access_flags,
2443 struct ib_pd *pd,
2444 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 int (*dereg_mr)(struct ib_mr *mr);
Sagi Grimberg9bee1782015-07-30 10:32:35 +03002446 struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
2447 enum ib_mr_type mr_type,
2448 u32 max_num_sg);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03002449 int (*map_mr_sg)(struct ib_mr *mr,
2450 struct scatterlist *sg,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002451 int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07002452 unsigned int *sg_offset);
Shani Michaeli7083e422013-02-06 16:19:12 +00002453 struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
Matan Barakb2a239d2016-02-29 18:05:29 +02002454 enum ib_mw_type type,
2455 struct ib_udata *udata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 int (*dealloc_mw)(struct ib_mw *mw);
2457 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
2458 int mr_access_flags,
2459 struct ib_fmr_attr *fmr_attr);
2460 int (*map_phys_fmr)(struct ib_fmr *fmr,
2461 u64 *page_list, int list_len,
2462 u64 iova);
2463 int (*unmap_fmr)(struct list_head *fmr_list);
2464 int (*dealloc_fmr)(struct ib_fmr *fmr);
2465 int (*attach_mcast)(struct ib_qp *qp,
2466 union ib_gid *gid,
2467 u16 lid);
2468 int (*detach_mcast)(struct ib_qp *qp,
2469 union ib_gid *gid,
2470 u16 lid);
2471 int (*process_mad)(struct ib_device *device,
2472 int process_mad_flags,
2473 u8 port_num,
Ira Weinya97e2d82015-05-31 17:15:30 -04002474 const struct ib_wc *in_wc,
2475 const struct ib_grh *in_grh,
Ira Weiny4cd7c942015-06-06 14:38:31 -04002476 const struct ib_mad_hdr *in_mad,
2477 size_t in_mad_size,
2478 struct ib_mad_hdr *out_mad,
2479 size_t *out_mad_size,
2480 u16 *out_mad_pkey_index);
Sean Hefty59991f92011-05-23 17:52:46 -07002481 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
2482 struct ib_ucontext *ucontext,
2483 struct ib_udata *udata);
2484 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002485 struct ib_flow * (*create_flow)(struct ib_qp *qp,
2486 struct ib_flow_attr
2487 *flow_attr,
Matan Barak59082a32018-05-31 16:43:35 +03002488 int domain,
2489 struct ib_udata *udata);
Hadar Hen Zion319a4412013-08-07 14:01:59 +03002490 int (*destroy_flow)(struct ib_flow *flow_id);
Sagi Grimberg1b01d332014-02-23 14:19:05 +02002491 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2492 struct ib_mr_status *mr_status);
Yishai Hadas036b1062015-08-13 18:32:05 +03002493 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
Steve Wise765d6772016-02-17 08:15:41 -08002494 void (*drain_rq)(struct ib_qp *qp);
2495 void (*drain_sq)(struct ib_qp *qp);
Eli Cohen50174a72016-03-11 22:58:38 +02002496 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2497 int state);
2498 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2499 struct ifla_vf_info *ivf);
2500 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2501 struct ifla_vf_stats *stats);
2502 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2503 int type);
Yishai Hadas5fd251c2016-05-23 15:20:48 +03002504 struct ib_wq * (*create_wq)(struct ib_pd *pd,
2505 struct ib_wq_init_attr *init_attr,
2506 struct ib_udata *udata);
2507 int (*destroy_wq)(struct ib_wq *wq);
2508 int (*modify_wq)(struct ib_wq *wq,
2509 struct ib_wq_attr *attr,
2510 u32 wq_attr_mask,
2511 struct ib_udata *udata);
Yishai Hadas6d397862016-05-23 15:20:51 +03002512 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
2513 struct ib_rwq_ind_table_init_attr *init_attr,
2514 struct ib_udata *udata);
2515 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
Matan Barak2eb9bea2018-03-28 09:27:45 +03002516 struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
2517 const struct ib_flow_action_attrs_esp *attr,
2518 struct uverbs_attr_bundle *attrs);
2519 int (*destroy_flow_action)(struct ib_flow_action *action);
Matan Barak7d12f8d2018-03-28 09:27:48 +03002520 int (*modify_flow_action_esp)(struct ib_flow_action *action,
2521 const struct ib_flow_action_attrs_esp *attr,
2522 struct uverbs_attr_bundle *attrs);
Ariel Levkovichbee76d72018-04-05 18:53:24 +03002523 struct ib_dm * (*alloc_dm)(struct ib_device *device,
2524 struct ib_ucontext *context,
2525 struct ib_dm_alloc_attr *attr,
2526 struct uverbs_attr_bundle *attrs);
2527 int (*dealloc_dm)(struct ib_dm *dm);
Ariel Levkovichbe934cc2018-04-05 18:53:25 +03002528 struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2529 struct ib_dm_mr_attr *attr,
2530 struct uverbs_attr_bundle *attrs);
Raed Salemfa9b1802018-05-31 16:43:31 +03002531 struct ib_counters * (*create_counters)(struct ib_device *device,
2532 struct uverbs_attr_bundle *attrs);
2533 int (*destroy_counters)(struct ib_counters *counters);
Raed Salem51d7a532018-05-31 16:43:33 +03002534 int (*read_counters)(struct ib_counters *counters,
2535 struct ib_counters_read_attr *counters_read_attr,
2536 struct uverbs_attr_bundle *attrs);
Raed Salemfa9b1802018-05-31 16:43:31 +03002537
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002538 /**
Niranjana Vishwanathapura8e959602017-06-30 13:14:46 -07002539 * rdma netdev operation
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002540 *
Denis Drozdovf6a8a192018-08-14 14:08:51 +03002541 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2542 * must return -EOPNOTSUPP if it doesn't support the specified type.
Vishwanathapura, Niranjana2fc77572017-04-12 20:29:20 -07002543 */
2544 struct net_device *(*alloc_rdma_netdev)(
2545 struct ib_device *device,
2546 u8 port_num,
2547 enum rdma_netdev_t type,
2548 const char *name,
2549 unsigned char name_assign_type,
2550 void (*setup)(struct net_device *));
Ralph Campbell9b513092006-12-12 14:27:41 -08002551
Denis Drozdovf6a8a192018-08-14 14:08:51 +03002552 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2553 enum rdma_netdev_t type,
2554 struct rdma_netdev_alloc_params *params);
2555
Roland Dreiere2773c02005-07-07 17:57:10 -07002556 struct module *owner;
Tony Jonesf4e91eb2008-02-22 00:13:36 +01002557 struct device dev;
Parav Panditd4122f52018-10-11 22:31:53 +03002558 /* First group for device attributes,
2559 * Second group for driver provided attributes (optional).
2560 * It is NULL terminated array.
2561 */
2562 const struct attribute_group *groups[3];
Parav Panditadee9f32018-09-05 09:47:58 +03002563
Parav Pandit1ae4cfa2018-10-07 12:12:41 +03002564 struct kobject *ports_kobj;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 struct list_head port_list;
2566
2567 enum {
2568 IB_DEV_UNINITIALIZED,
2569 IB_DEV_REGISTERED,
2570 IB_DEV_UNREGISTERED
2571 } reg_state;
2572
Roland Dreier274c0892005-09-29 14:17:48 -07002573 int uverbs_abi_ver;
Alexander Chiang17a55f72010-02-02 19:09:16 +00002574 u64 uverbs_cmd_mask;
Yann Droneaudf21519b2013-11-06 23:21:49 +01002575 u64 uverbs_ex_cmd_mask;
Roland Dreier274c0892005-09-29 14:17:48 -07002576
Yuval Shaiabd99fde2016-08-25 10:57:07 -07002577 char node_desc[IB_DEVICE_NODE_DESC_MAX];
Sean Heftycf311cd2006-01-10 07:39:34 -08002578 __be64 node_guid;
Steve Wise96f15c02008-07-14 23:48:53 -07002579 u32 local_dma_lkey;
Hal Rosenstock41390322015-06-29 09:57:00 -04002580 u16 is_switch:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 u8 node_type;
2582 u8 phys_port_cnt;
Ira Weiny3e153a92015-12-18 10:59:44 +02002583 struct ib_device_attr attrs;
Christoph Lameterb40f4752016-05-16 12:49:33 -05002584 struct attribute_group *hw_stats_ag;
2585 struct rdma_hw_stats *hw_stats;
Ira Weiny77386132015-05-13 20:02:58 -04002586
Parav Pandit43579b52017-01-10 00:02:14 +00002587#ifdef CONFIG_CGROUP_RDMA
2588 struct rdmacg_device cg_device;
2589#endif
2590
Leon Romanovskyecc82c52017-06-18 14:39:59 +03002591 u32 index;
Leon Romanovsky02d88832018-01-28 11:17:20 +02002592 /*
2593 * Implementation details of the RDMA core, don't use in drivers
2594 */
2595 struct rdma_restrack_root res;
Leon Romanovskyecc82c52017-06-18 14:39:59 +03002596
Ira Weiny77386132015-05-13 20:02:58 -04002597 /**
2598 * The following mandatory functions are used only at device
2599 * registration. Keep functions such as these at the end of this
2600 * structure to avoid cache line misses when accessing struct ib_device
2601 * in fast paths.
2602 */
2603 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002604 void (*get_dev_fw_str)(struct ib_device *, char *str);
Sagi Grimbergc66cd352017-07-13 11:09:41 +03002605 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2606 int comp_vector);
Matan Barakfac96582017-08-03 16:06:57 +03002607
Jason Gunthorpe0cbf4322018-11-12 22:59:50 +02002608 const struct uapi_definition *driver_def;
Matan Barak0ede73b2018-03-19 15:02:34 +02002609 enum rdma_driver_id driver_id;
Parav Pandit01b67112018-11-16 03:50:57 +02002610 /*
2611 * Provides synchronization between device unregistration and netlink
2612 * commands on a device. To be used only by core.
2613 */
2614 refcount_t refcount;
2615 struct completion unreg_completion;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616};
2617
2618struct ib_client {
2619 char *name;
2620 void (*add) (struct ib_device *);
Haggai Eran7c1eb452015-07-30 17:50:14 +03002621 void (*remove)(struct ib_device *, void *client_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
Yotam Kenneth9268f722015-07-30 17:50:15 +03002623 /* Returns the net_dev belonging to this ib_client and matching the
2624 * given parameters.
2625 * @dev: An RDMA device that the net_dev use for communication.
2626 * @port: A physical port number on the RDMA device.
2627 * @pkey: P_Key that the net_dev uses if applicable.
2628 * @gid: A GID that the net_dev uses to communicate.
2629 * @addr: An IP address the net_dev is configured with.
2630 * @client_data: The device's client data set by ib_set_client_data().
2631 *
2632 * An ib_client that implements a net_dev on top of RDMA devices
2633 * (such as IP over IB) should implement this callback, allowing the
2634 * rdma_cm module to find the right net_dev for a given request.
2635 *
2636 * The caller is responsible for calling dev_put on the returned
2637 * netdev. */
2638 struct net_device *(*get_net_dev_by_params)(
2639 struct ib_device *dev,
2640 u8 port,
2641 u16 pkey,
2642 const union ib_gid *gid,
2643 const struct sockaddr *addr,
2644 void *client_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 struct list_head list;
2646};
2647
2648struct ib_device *ib_alloc_device(size_t size);
2649void ib_dealloc_device(struct ib_device *device);
2650
Leon Romanovsky9abb0d12017-06-27 16:49:53 +03002651void ib_get_device_fw_str(struct ib_device *device, char *str);
Ira Weiny5fa76c22016-06-15 02:21:56 -04002652
Jason Gunthorpee349f852018-09-25 16:58:09 -06002653int ib_register_device(struct ib_device *device, const char *name,
2654 int (*port_callback)(struct ib_device *, u8,
2655 struct kobject *));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656void ib_unregister_device(struct ib_device *device);
2657
2658int ib_register_client (struct ib_client *client);
2659void ib_unregister_client(struct ib_client *client);
2660
2661void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2662void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2663 void *data);
2664
Jason Gunthorpe5f9794d2018-09-16 20:43:08 +03002665#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2666int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2667 unsigned long pfn, unsigned long size, pgprot_t prot);
2668int rdma_user_mmap_page(struct ib_ucontext *ucontext,
2669 struct vm_area_struct *vma, struct page *page,
2670 unsigned long size);
2671#else
2672static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
2673 struct vm_area_struct *vma,
2674 unsigned long pfn, unsigned long size,
2675 pgprot_t prot)
2676{
2677 return -EINVAL;
2678}
2679static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext,
2680 struct vm_area_struct *vma, struct page *page,
2681 unsigned long size)
2682{
2683 return -EINVAL;
2684}
2685#endif
2686
Roland Dreiere2773c02005-07-07 17:57:10 -07002687static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2688{
2689 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2690}
2691
2692static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2693{
Yann Droneaud43c611652015-02-05 22:10:18 +01002694 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
Roland Dreiere2773c02005-07-07 17:57:10 -07002695}
2696
Matan Barakc66db312018-03-19 15:02:36 +02002697static inline bool ib_is_buffer_cleared(const void __user *p,
2698 size_t len)
Matan Barak301a7212015-12-15 20:30:10 +02002699{
Markus Elfring92d27ae2016-08-22 18:23:24 +02002700 bool ret;
Matan Barak301a7212015-12-15 20:30:10 +02002701 u8 *buf;
2702
2703 if (len > USHRT_MAX)
2704 return false;
2705
Markus Elfring92d27ae2016-08-22 18:23:24 +02002706 buf = memdup_user(p, len);
2707 if (IS_ERR(buf))
Matan Barak301a7212015-12-15 20:30:10 +02002708 return false;
2709
Matan Barak301a7212015-12-15 20:30:10 +02002710 ret = !memchr_inv(buf, 0, len);
Matan Barak301a7212015-12-15 20:30:10 +02002711 kfree(buf);
2712 return ret;
2713}
2714
Matan Barakc66db312018-03-19 15:02:36 +02002715static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2716 size_t offset,
2717 size_t len)
2718{
2719 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2720}
2721
Roland Dreier8a518662006-02-13 12:48:12 -08002722/**
Yishai Hadas1c774832018-06-20 17:11:39 +03002723 * ib_is_destroy_retryable - Check whether the uobject destruction
2724 * is retryable.
2725 * @ret: The initial destruction return code
2726 * @why: remove reason
2727 * @uobj: The uobject that is destroyed
2728 *
2729 * This function is a helper function that IB layer and low-level drivers
2730 * can use to consider whether the destruction of the given uobject is
2731 * retry-able.
2732 * It checks the original return code, if it wasn't success the destruction
2733 * is retryable according to the ucontext state (i.e. cleanup_retryable) and
2734 * the remove reason. (i.e. why).
2735 * Must be called with the object locked for destroy.
2736 */
2737static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2738 struct ib_uobject *uobj)
2739{
2740 return ret && (why == RDMA_REMOVE_DESTROY ||
2741 uobj->context->cleanup_retryable);
2742}
2743
2744/**
2745 * ib_destroy_usecnt - Called during destruction to check the usecnt
2746 * @usecnt: The usecnt atomic
2747 * @why: remove reason
2748 * @uobj: The uobject that is destroyed
2749 *
2750 * Non-zero usecnts will block destruction unless destruction was triggered by
2751 * a ucontext cleanup.
2752 */
2753static inline int ib_destroy_usecnt(atomic_t *usecnt,
2754 enum rdma_remove_reason why,
2755 struct ib_uobject *uobj)
2756{
2757 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2758 return -EBUSY;
2759 return 0;
2760}
2761
2762/**
Roland Dreier8a518662006-02-13 12:48:12 -08002763 * ib_modify_qp_is_ok - Check that the supplied attribute mask
2764 * contains all required attributes and no attributes not allowed for
2765 * the given QP state transition.
2766 * @cur_state: Current QP state
2767 * @next_state: Next QP state
2768 * @type: QP type
2769 * @mask: Mask of supplied QP attributes
2770 *
2771 * This function is a helper function that a low-level driver's
2772 * modify_qp method can use to validate the consumer's input. It
2773 * checks that cur_state and next_state are valid QP states, that a
2774 * transition from cur_state to next_state is allowed by the IB spec,
2775 * and that the attribute mask supplied is allowed for the transition.
2776 */
Leon Romanovsky19b1f542018-03-11 13:51:35 +02002777bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
Kamal Heibd31131b2018-10-02 16:11:21 +03002778 enum ib_qp_type type, enum ib_qp_attr_mask mask);
Roland Dreier8a518662006-02-13 12:48:12 -08002779
Leon Romanovskydcc98812017-08-17 15:50:36 +03002780void ib_register_event_handler(struct ib_event_handler *event_handler);
2781void ib_unregister_event_handler(struct ib_event_handler *event_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782void ib_dispatch_event(struct ib_event *event);
2783
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784int ib_query_port(struct ib_device *device,
2785 u8 port_num, struct ib_port_attr *port_attr);
2786
Eli Cohena3f5ada2010-09-27 17:51:10 -07002787enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2788 u8 port_num);
2789
Ira Weiny0cf18d72015-05-13 20:02:55 -04002790/**
Hal Rosenstock41390322015-06-29 09:57:00 -04002791 * rdma_cap_ib_switch - Check if the device is IB switch
2792 * @device: Device to check
2793 *
2794 * Device driver is responsible for setting is_switch bit on
2795 * in ib_device structure at init time.
2796 *
2797 * Return: true if the device is IB switch.
2798 */
2799static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2800{
2801 return device->is_switch;
2802}
2803
2804/**
Ira Weiny0cf18d72015-05-13 20:02:55 -04002805 * rdma_start_port - Return the first valid port number for the device
2806 * specified
2807 *
2808 * @device: Device to be checked
2809 *
2810 * Return start port number
2811 */
2812static inline u8 rdma_start_port(const struct ib_device *device)
2813{
Hal Rosenstock41390322015-06-29 09:57:00 -04002814 return rdma_cap_ib_switch(device) ? 0 : 1;
Ira Weiny0cf18d72015-05-13 20:02:55 -04002815}
2816
2817/**
2818 * rdma_end_port - Return the last valid port number for the device
2819 * specified
2820 *
2821 * @device: Device to be checked
2822 *
2823 * Return last port number
2824 */
2825static inline u8 rdma_end_port(const struct ib_device *device)
2826{
Hal Rosenstock41390322015-06-29 09:57:00 -04002827 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
Ira Weiny0cf18d72015-05-13 20:02:55 -04002828}
2829
Yuval Shaia24dc8312017-01-25 18:41:37 +02002830static inline int rdma_is_port_valid(const struct ib_device *device,
2831 unsigned int port)
2832{
2833 return (port >= rdma_start_port(device) &&
2834 port <= rdma_end_port(device));
2835}
2836
Artemy Kovalyovb02289b2018-07-04 15:57:50 +03002837static inline bool rdma_is_grh_required(const struct ib_device *device,
2838 u8 port_num)
2839{
2840 return device->port_immutable[port_num].core_cap_flags &
2841 RDMA_CORE_PORT_IB_GRH_REQUIRED;
2842}
2843
Ira Weiny5ede9282015-05-31 17:15:29 -04002844static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02002845{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002846 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
Michael Wangde66be92015-05-05 14:50:19 +02002847}
2848
Ira Weiny5ede9282015-05-31 17:15:29 -04002849static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02002850{
Matan Barak7766a992015-12-23 14:56:50 +02002851 return device->port_immutable[port_num].core_cap_flags &
2852 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2853}
2854
2855static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2856{
2857 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2858}
2859
2860static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2861{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002862 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
Michael Wangde66be92015-05-05 14:50:19 +02002863}
2864
Ira Weiny5ede9282015-05-31 17:15:29 -04002865static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02002866{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002867 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
Michael Wangde66be92015-05-05 14:50:19 +02002868}
2869
Ira Weiny5ede9282015-05-31 17:15:29 -04002870static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
Michael Wangde66be92015-05-05 14:50:19 +02002871{
Matan Barak7766a992015-12-23 14:56:50 +02002872 return rdma_protocol_ib(device, port_num) ||
2873 rdma_protocol_roce(device, port_num);
Michael Wangde66be92015-05-05 14:50:19 +02002874}
2875
Or Gerlitzaa773bd2017-01-24 13:02:35 +02002876static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2877{
2878 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2879}
2880
Or Gerlitzce1e0552017-01-24 13:02:38 +02002881static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2882{
2883 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2884}
2885
Michael Wangc757dea2015-05-05 14:50:32 +02002886/**
Michael Wang296ec002015-05-18 10:41:45 +02002887 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
Michael Wangc757dea2015-05-05 14:50:32 +02002888 * Management Datagrams.
Michael Wang296ec002015-05-18 10:41:45 +02002889 * @device: Device to check
2890 * @port_num: Port number to check
Michael Wangc757dea2015-05-05 14:50:32 +02002891 *
Michael Wang296ec002015-05-18 10:41:45 +02002892 * Management Datagrams (MAD) are a required part of the InfiniBand
2893 * specification and are supported on all InfiniBand devices. A slightly
2894 * extended version are also supported on OPA interfaces.
Michael Wangc757dea2015-05-05 14:50:32 +02002895 *
Michael Wang296ec002015-05-18 10:41:45 +02002896 * Return: true if the port supports sending/receiving of MAD packets.
Michael Wangc757dea2015-05-05 14:50:32 +02002897 */
Ira Weiny5ede9282015-05-31 17:15:29 -04002898static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
Michael Wangc757dea2015-05-05 14:50:32 +02002899{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002900 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
Michael Wangc757dea2015-05-05 14:50:32 +02002901}
2902
Michael Wang29541e32015-05-05 14:50:33 +02002903/**
Ira Weiny65995fe2015-06-06 14:38:32 -04002904 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2905 * Management Datagrams.
2906 * @device: Device to check
2907 * @port_num: Port number to check
2908 *
2909 * Intel OmniPath devices extend and/or replace the InfiniBand Management
2910 * datagrams with their own versions. These OPA MADs share many but not all of
2911 * the characteristics of InfiniBand MADs.
2912 *
2913 * OPA MADs differ in the following ways:
2914 *
2915 * 1) MADs are variable size up to 2K
2916 * IBTA defined MADs remain fixed at 256 bytes
2917 * 2) OPA SMPs must carry valid PKeys
2918 * 3) OPA SMP packets are a different format
2919 *
2920 * Return: true if the port supports OPA MAD packet formats.
2921 */
2922static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2923{
2924 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2925 == RDMA_CORE_CAP_OPA_MAD;
2926}
2927
2928/**
Michael Wang296ec002015-05-18 10:41:45 +02002929 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2930 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2931 * @device: Device to check
2932 * @port_num: Port number to check
Michael Wang29541e32015-05-05 14:50:33 +02002933 *
Michael Wang296ec002015-05-18 10:41:45 +02002934 * Each InfiniBand node is required to provide a Subnet Management Agent
2935 * that the subnet manager can access. Prior to the fabric being fully
2936 * configured by the subnet manager, the SMA is accessed via a well known
2937 * interface called the Subnet Management Interface (SMI). This interface
2938 * uses directed route packets to communicate with the SM to get around the
2939 * chicken and egg problem of the SM needing to know what's on the fabric
2940 * in order to configure the fabric, and needing to configure the fabric in
2941 * order to send packets to the devices on the fabric. These directed
2942 * route packets do not need the fabric fully configured in order to reach
2943 * their destination. The SMI is the only method allowed to send
2944 * directed route packets on an InfiniBand fabric.
Michael Wang29541e32015-05-05 14:50:33 +02002945 *
Michael Wang296ec002015-05-18 10:41:45 +02002946 * Return: true if the port provides an SMI.
Michael Wang29541e32015-05-05 14:50:33 +02002947 */
Ira Weiny5ede9282015-05-31 17:15:29 -04002948static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
Michael Wang29541e32015-05-05 14:50:33 +02002949{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002950 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
Michael Wang29541e32015-05-05 14:50:33 +02002951}
2952
Michael Wang72219cea2015-05-05 14:50:34 +02002953/**
2954 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2955 * Communication Manager.
Michael Wang296ec002015-05-18 10:41:45 +02002956 * @device: Device to check
2957 * @port_num: Port number to check
Michael Wang72219cea2015-05-05 14:50:34 +02002958 *
Michael Wang296ec002015-05-18 10:41:45 +02002959 * The InfiniBand Communication Manager is one of many pre-defined General
2960 * Service Agents (GSA) that are accessed via the General Service
2961 * Interface (GSI). It's role is to facilitate establishment of connections
2962 * between nodes as well as other management related tasks for established
2963 * connections.
Michael Wang72219cea2015-05-05 14:50:34 +02002964 *
Michael Wang296ec002015-05-18 10:41:45 +02002965 * Return: true if the port supports an IB CM (this does not guarantee that
2966 * a CM is actually running however).
Michael Wang72219cea2015-05-05 14:50:34 +02002967 */
Ira Weiny5ede9282015-05-31 17:15:29 -04002968static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
Michael Wang72219cea2015-05-05 14:50:34 +02002969{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002970 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
Michael Wang72219cea2015-05-05 14:50:34 +02002971}
2972
Michael Wang04215332015-05-05 14:50:35 +02002973/**
2974 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2975 * Communication Manager.
Michael Wang296ec002015-05-18 10:41:45 +02002976 * @device: Device to check
2977 * @port_num: Port number to check
Michael Wang04215332015-05-05 14:50:35 +02002978 *
Michael Wang296ec002015-05-18 10:41:45 +02002979 * Similar to above, but specific to iWARP connections which have a different
2980 * managment protocol than InfiniBand.
Michael Wang04215332015-05-05 14:50:35 +02002981 *
Michael Wang296ec002015-05-18 10:41:45 +02002982 * Return: true if the port supports an iWARP CM (this does not guarantee that
2983 * a CM is actually running however).
Michael Wang04215332015-05-05 14:50:35 +02002984 */
Ira Weiny5ede9282015-05-31 17:15:29 -04002985static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
Michael Wang04215332015-05-05 14:50:35 +02002986{
Ira Weinyf9b22e32015-05-13 20:02:59 -04002987 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
Michael Wang04215332015-05-05 14:50:35 +02002988}
2989
Michael Wangfe53ba22015-05-05 14:50:36 +02002990/**
2991 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2992 * Subnet Administration.
Michael Wang296ec002015-05-18 10:41:45 +02002993 * @device: Device to check
2994 * @port_num: Port number to check
Michael Wangfe53ba22015-05-05 14:50:36 +02002995 *
Michael Wang296ec002015-05-18 10:41:45 +02002996 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2997 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
2998 * fabrics, devices should resolve routes to other hosts by contacting the
2999 * SA to query the proper route.
Michael Wangfe53ba22015-05-05 14:50:36 +02003000 *
Michael Wang296ec002015-05-18 10:41:45 +02003001 * Return: true if the port should act as a client to the fabric Subnet
3002 * Administration interface. This does not imply that the SA service is
3003 * running locally.
Michael Wangfe53ba22015-05-05 14:50:36 +02003004 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003005static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
Michael Wangfe53ba22015-05-05 14:50:36 +02003006{
Ira Weinyf9b22e32015-05-13 20:02:59 -04003007 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
Michael Wangfe53ba22015-05-05 14:50:36 +02003008}
3009
Michael Wanga31ad3b2015-05-05 14:50:37 +02003010/**
3011 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3012 * Multicast.
Michael Wang296ec002015-05-18 10:41:45 +02003013 * @device: Device to check
3014 * @port_num: Port number to check
Michael Wanga31ad3b2015-05-05 14:50:37 +02003015 *
Michael Wang296ec002015-05-18 10:41:45 +02003016 * InfiniBand multicast registration is more complex than normal IPv4 or
3017 * IPv6 multicast registration. Each Host Channel Adapter must register
3018 * with the Subnet Manager when it wishes to join a multicast group. It
3019 * should do so only once regardless of how many queue pairs it subscribes
3020 * to this group. And it should leave the group only after all queue pairs
3021 * attached to the group have been detached.
Michael Wanga31ad3b2015-05-05 14:50:37 +02003022 *
Michael Wang296ec002015-05-18 10:41:45 +02003023 * Return: true if the port must undertake the additional adminstrative
3024 * overhead of registering/unregistering with the SM and tracking of the
3025 * total number of queue pairs attached to the multicast group.
Michael Wanga31ad3b2015-05-05 14:50:37 +02003026 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003027static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
Michael Wanga31ad3b2015-05-05 14:50:37 +02003028{
3029 return rdma_cap_ib_sa(device, port_num);
3030}
3031
Michael Wangbc0f1d72015-05-05 14:50:38 +02003032/**
Michael Wang30a74ef2015-05-05 14:50:39 +02003033 * rdma_cap_af_ib - Check if the port of device has the capability
3034 * Native Infiniband Address.
Michael Wang296ec002015-05-18 10:41:45 +02003035 * @device: Device to check
3036 * @port_num: Port number to check
Michael Wang30a74ef2015-05-05 14:50:39 +02003037 *
Michael Wang296ec002015-05-18 10:41:45 +02003038 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3039 * GID. RoCE uses a different mechanism, but still generates a GID via
3040 * a prescribed mechanism and port specific data.
Michael Wang30a74ef2015-05-05 14:50:39 +02003041 *
Michael Wang296ec002015-05-18 10:41:45 +02003042 * Return: true if the port uses a GID address to identify devices on the
3043 * network.
Michael Wang30a74ef2015-05-05 14:50:39 +02003044 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003045static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
Michael Wang30a74ef2015-05-05 14:50:39 +02003046{
Ira Weinyf9b22e32015-05-13 20:02:59 -04003047 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
Michael Wang30a74ef2015-05-05 14:50:39 +02003048}
3049
3050/**
Michael Wang227128f2015-05-05 14:50:40 +02003051 * rdma_cap_eth_ah - Check if the port of device has the capability
Michael Wang296ec002015-05-18 10:41:45 +02003052 * Ethernet Address Handle.
3053 * @device: Device to check
3054 * @port_num: Port number to check
Michael Wang227128f2015-05-05 14:50:40 +02003055 *
Michael Wang296ec002015-05-18 10:41:45 +02003056 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3057 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3058 * port. Normally, packet headers are generated by the sending host
3059 * adapter, but when sending connectionless datagrams, we must manually
3060 * inject the proper headers for the fabric we are communicating over.
Michael Wang227128f2015-05-05 14:50:40 +02003061 *
Michael Wang296ec002015-05-18 10:41:45 +02003062 * Return: true if we are running as a RoCE port and must force the
3063 * addition of a Global Route Header built from our Ethernet Address
3064 * Handle into our header list for connectionless packets.
Michael Wang227128f2015-05-05 14:50:40 +02003065 */
Ira Weiny5ede9282015-05-31 17:15:29 -04003066static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
Michael Wang227128f2015-05-05 14:50:40 +02003067{
Ira Weinyf9b22e32015-05-13 20:02:59 -04003068 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
Michael Wang227128f2015-05-05 14:50:40 +02003069}
3070
3071/**
Dasaratharaman Chandramouli94d595c2017-03-20 19:38:09 -04003072 * rdma_cap_opa_ah - Check if the port of device supports
3073 * OPA Address handles
3074 * @device: Device to check
3075 * @port_num: Port number to check
3076 *
3077 * Return: true if we are running on an OPA device which supports
3078 * the extended OPA addressing.
3079 */
3080static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3081{
3082 return (device->port_immutable[port_num].core_cap_flags &
3083 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3084}
3085
3086/**
Ira Weiny337877a2015-06-06 14:38:29 -04003087 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3088 *
3089 * @device: Device
3090 * @port_num: Port number
3091 *
3092 * This MAD size includes the MAD headers and MAD payload. No other headers
3093 * are included.
3094 *
3095 * Return the max MAD size required by the Port. Will return 0 if the port
3096 * does not support MADs
3097 */
3098static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3099{
3100 return device->port_immutable[port_num].max_mad_size;
3101}
3102
Matan Barak03db3a22015-07-30 18:33:26 +03003103/**
3104 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3105 * @device: Device to check
3106 * @port_num: Port number to check
3107 *
3108 * RoCE GID table mechanism manages the various GIDs for a device.
3109 *
3110 * NOTE: if allocating the port's GID table has failed, this call will still
3111 * return true, but any RoCE GID table API will fail.
3112 *
3113 * Return: true if the port uses RoCE GID table mechanism in order to manage
3114 * its GIDs.
3115 */
3116static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3117 u8 port_num)
3118{
3119 return rdma_protocol_roce(device, port_num) &&
3120 device->add_gid && device->del_gid;
3121}
3122
Christoph Hellwig002516e2016-05-03 18:01:05 +02003123/*
3124 * Check if the device supports READ W/ INVALIDATE.
3125 */
3126static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3127{
3128 /*
3129 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3130 * has support for it yet.
3131 */
3132 return rdma_protocol_iwarp(dev, port_num);
3133}
3134
Eli Cohen50174a72016-03-11 22:58:38 +02003135int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3136 int state);
3137int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3138 struct ifla_vf_info *info);
3139int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3140 struct ifla_vf_stats *stats);
3141int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3142 int type);
3143
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144int ib_query_pkey(struct ib_device *device,
3145 u8 port_num, u16 index, u16 *pkey);
3146
3147int ib_modify_device(struct ib_device *device,
3148 int device_modify_mask,
3149 struct ib_device_modify *device_modify);
3150
3151int ib_modify_port(struct ib_device *device,
3152 u8 port_num, int port_modify_mask,
3153 struct ib_port_modify *port_modify);
3154
Yosef Etigin5eb620c2007-05-14 07:26:51 +03003155int ib_find_gid(struct ib_device *device, union ib_gid *gid,
Parav Panditb26c4a12018-03-13 16:06:12 +02003156 u8 *port_num, u16 *index);
Yosef Etigin5eb620c2007-05-14 07:26:51 +03003157
3158int ib_find_pkey(struct ib_device *device,
3159 u8 port_num, u16 pkey, u16 *index);
3160
Christoph Hellwiged082d32016-09-05 12:56:17 +02003161enum ib_pd_flags {
3162 /*
3163 * Create a memory registration for all memory in the system and place
3164 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3165 * ULPs to avoid the overhead of dynamic MRs.
3166 *
3167 * This flag is generally considered unsafe and must only be used in
3168 * extremly trusted environments. Every use of it will log a warning
3169 * in the kernel log.
3170 */
3171 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3172};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173
Christoph Hellwiged082d32016-09-05 12:56:17 +02003174struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3175 const char *caller);
3176#define ib_alloc_pd(device, flags) \
Leon Romanovskye4496442018-01-28 11:17:18 +02003177 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
Jason Gunthorpe7dd78642015-08-05 14:34:31 -06003178void ib_dealloc_pd(struct ib_pd *pd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179
3180/**
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -04003181 * rdma_create_ah - Creates an address handle for the given address vector.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 * @pd: The protection domain associated with the address handle.
3183 * @ah_attr: The attributes of the address vector.
3184 *
3185 * The address handle is used to reference a local or global destination
3186 * in all UD QP post sends.
3187 */
Dasaratharaman Chandramouli0a18cfe2017-04-29 14:41:19 -04003188struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189
3190/**
Parav Pandit5cda6582017-10-16 08:45:12 +03003191 * rdma_create_user_ah - Creates an address handle for the given address vector.
3192 * It resolves destination mac address for ah attribute of RoCE type.
3193 * @pd: The protection domain associated with the address handle.
3194 * @ah_attr: The attributes of the address vector.
3195 * @udata: pointer to user's input output buffer information need by
3196 * provider driver.
3197 *
3198 * It returns 0 on success and returns appropriate error code on error.
3199 * The address handle is used to reference a local or global destination
3200 * in all UD QP post sends.
3201 */
3202struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3203 struct rdma_ah_attr *ah_attr,
3204 struct ib_udata *udata);
3205/**
Moni Shoua850d8fd2016-11-10 11:30:56 +02003206 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3207 * work completion.
3208 * @hdr: the L3 header to parse
3209 * @net_type: type of header to parse
3210 * @sgid: place to store source gid
3211 * @dgid: place to store destination gid
3212 */
3213int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3214 enum rdma_network_type net_type,
3215 union ib_gid *sgid, union ib_gid *dgid);
3216
3217/**
3218 * ib_get_rdma_header_version - Get the header version
3219 * @hdr: the L3 header to parse
3220 */
3221int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3222
3223/**
Parav Panditf6bdb142017-11-14 14:52:17 +02003224 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
Sean Hefty4e00d692006-06-17 20:37:39 -07003225 * work completion.
3226 * @device: Device on which the received message arrived.
3227 * @port_num: Port on which the received message arrived.
3228 * @wc: Work completion associated with the received message.
3229 * @grh: References the received global route header. This parameter is
3230 * ignored unless the work completion indicates that the GRH is valid.
3231 * @ah_attr: Returned attributes that can be used when creating an address
3232 * handle for replying to the message.
Parav Panditb7403212018-06-19 10:59:14 +03003233 * When ib_init_ah_attr_from_wc() returns success,
3234 * (a) for IB link layer it optionally contains a reference to SGID attribute
3235 * when GRH is present for IB link layer.
3236 * (b) for RoCE link layer it contains a reference to SGID attribute.
3237 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3238 * attributes which are initialized using ib_init_ah_attr_from_wc().
3239 *
Sean Hefty4e00d692006-06-17 20:37:39 -07003240 */
Parav Panditf6bdb142017-11-14 14:52:17 +02003241int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3242 const struct ib_wc *wc, const struct ib_grh *grh,
3243 struct rdma_ah_attr *ah_attr);
Sean Hefty4e00d692006-06-17 20:37:39 -07003244
3245/**
Hal Rosenstock513789e2005-07-27 11:45:34 -07003246 * ib_create_ah_from_wc - Creates an address handle associated with the
3247 * sender of the specified work completion.
3248 * @pd: The protection domain associated with the address handle.
3249 * @wc: Work completion information associated with a received message.
3250 * @grh: References the received global route header. This parameter is
3251 * ignored unless the work completion indicates that the GRH is valid.
3252 * @port_num: The outbound port number to associate with the address.
3253 *
3254 * The address handle is used to reference a local or global destination
3255 * in all UD QP post sends.
3256 */
Ira Weiny73cdaae2015-05-31 17:15:31 -04003257struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3258 const struct ib_grh *grh, u8 port_num);
Hal Rosenstock513789e2005-07-27 11:45:34 -07003259
3260/**
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -04003261 * rdma_modify_ah - Modifies the address vector associated with an address
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 * handle.
3263 * @ah: The address handle to modify.
3264 * @ah_attr: The new address vector attributes to associate with the
3265 * address handle.
3266 */
Dasaratharaman Chandramouli67b985b2017-04-29 14:41:20 -04003267int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268
3269/**
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -04003270 * rdma_query_ah - Queries the address vector associated with an address
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 * handle.
3272 * @ah: The address handle to query.
3273 * @ah_attr: The address vector attributes associated with the address
3274 * handle.
3275 */
Dasaratharaman Chandramoulibfbfd662017-04-29 14:41:21 -04003276int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277
3278/**
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -04003279 * rdma_destroy_ah - Destroys an address handle.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 * @ah: The address handle to destroy.
3281 */
Dasaratharaman Chandramouli36523152017-04-29 14:41:22 -04003282int rdma_destroy_ah(struct ib_ah *ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
3284/**
Roland Dreierd41fcc62005-08-18 12:23:08 -07003285 * ib_create_srq - Creates a SRQ associated with the specified protection
3286 * domain.
3287 * @pd: The protection domain associated with the SRQ.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08003288 * @srq_init_attr: A list of initial attributes required to create the
3289 * SRQ. If SRQ creation succeeds, then the attributes are updated to
3290 * the actual capabilities of the created SRQ.
Roland Dreierd41fcc62005-08-18 12:23:08 -07003291 *
3292 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
3293 * requested size of the SRQ, and set to the actual values allocated
3294 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
3295 * will always be at least as large as the requested values.
3296 */
3297struct ib_srq *ib_create_srq(struct ib_pd *pd,
3298 struct ib_srq_init_attr *srq_init_attr);
3299
3300/**
3301 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3302 * @srq: The SRQ to modify.
3303 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3304 * the current values of selected SRQ attributes are returned.
3305 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3306 * are being modified.
3307 *
3308 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3309 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3310 * the number of receives queued drops below the limit.
3311 */
3312int ib_modify_srq(struct ib_srq *srq,
3313 struct ib_srq_attr *srq_attr,
3314 enum ib_srq_attr_mask srq_attr_mask);
3315
3316/**
3317 * ib_query_srq - Returns the attribute list and current values for the
3318 * specified SRQ.
3319 * @srq: The SRQ to query.
3320 * @srq_attr: The attributes of the specified SRQ.
3321 */
3322int ib_query_srq(struct ib_srq *srq,
3323 struct ib_srq_attr *srq_attr);
3324
3325/**
3326 * ib_destroy_srq - Destroys the specified SRQ.
3327 * @srq: The SRQ to destroy.
3328 */
3329int ib_destroy_srq(struct ib_srq *srq);
3330
3331/**
3332 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3333 * @srq: The SRQ to post the work request on.
3334 * @recv_wr: A list of work requests to post on the receive queue.
3335 * @bad_recv_wr: On an immediate failure, this parameter will reference
3336 * the work request that failed to be posted on the QP.
3337 */
3338static inline int ib_post_srq_recv(struct ib_srq *srq,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003339 const struct ib_recv_wr *recv_wr,
3340 const struct ib_recv_wr **bad_recv_wr)
Roland Dreierd41fcc62005-08-18 12:23:08 -07003341{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003342 const struct ib_recv_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003343
3344 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy);
Roland Dreierd41fcc62005-08-18 12:23:08 -07003345}
3346
3347/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 * ib_create_qp - Creates a QP associated with the specified protection
3349 * domain.
3350 * @pd: The protection domain associated with the QP.
Dotan Barakabb6e9b2006-02-23 12:13:51 -08003351 * @qp_init_attr: A list of initial attributes required to create the
3352 * QP. If QP creation succeeds, then the attributes are updated to
3353 * the actual capabilities of the created QP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 */
3355struct ib_qp *ib_create_qp(struct ib_pd *pd,
3356 struct ib_qp_init_attr *qp_init_attr);
3357
3358/**
Parav Pandita512c2f2017-05-23 11:26:08 +03003359 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3360 * @qp: The QP to modify.
3361 * @attr: On input, specifies the QP attributes to modify. On output,
3362 * the current values of selected QP attributes are returned.
3363 * @attr_mask: A bit-mask used to specify which attributes of the QP
3364 * are being modified.
3365 * @udata: pointer to user's input output buffer information
3366 * are being modified.
3367 * It returns 0 on success and returns appropriate error code on error.
3368 */
3369int ib_modify_qp_with_udata(struct ib_qp *qp,
3370 struct ib_qp_attr *attr,
3371 int attr_mask,
3372 struct ib_udata *udata);
3373
3374/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375 * ib_modify_qp - Modifies the attributes for the specified QP and then
3376 * transitions the QP to the given state.
3377 * @qp: The QP to modify.
3378 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3379 * the current values of selected QP attributes are returned.
3380 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3381 * are being modified.
3382 */
3383int ib_modify_qp(struct ib_qp *qp,
3384 struct ib_qp_attr *qp_attr,
3385 int qp_attr_mask);
3386
3387/**
3388 * ib_query_qp - Returns the attribute list and current values for the
3389 * specified QP.
3390 * @qp: The QP to query.
3391 * @qp_attr: The attributes of the specified QP.
3392 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3393 * @qp_init_attr: Additional attributes of the selected QP.
3394 *
3395 * The qp_attr_mask may be used to limit the query to gathering only the
3396 * selected attributes.
3397 */
3398int ib_query_qp(struct ib_qp *qp,
3399 struct ib_qp_attr *qp_attr,
3400 int qp_attr_mask,
3401 struct ib_qp_init_attr *qp_init_attr);
3402
3403/**
3404 * ib_destroy_qp - Destroys the specified QP.
3405 * @qp: The QP to destroy.
3406 */
3407int ib_destroy_qp(struct ib_qp *qp);
3408
3409/**
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003410 * ib_open_qp - Obtain a reference to an existing sharable QP.
3411 * @xrcd - XRC domain
3412 * @qp_open_attr: Attributes identifying the QP to open.
3413 *
3414 * Returns a reference to a sharable QP.
3415 */
3416struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3417 struct ib_qp_open_attr *qp_open_attr);
3418
3419/**
3420 * ib_close_qp - Release an external reference to a QP.
Sean Heftyd3d72d92011-05-26 23:06:44 -07003421 * @qp: The QP handle to release
3422 *
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003423 * The opened QP handle is released by the caller. The underlying
3424 * shared QP is not destroyed until all internal references are released.
Sean Heftyd3d72d92011-05-26 23:06:44 -07003425 */
Sean Hefty0e0ec7e2011-08-08 15:31:51 -07003426int ib_close_qp(struct ib_qp *qp);
Sean Heftyd3d72d92011-05-26 23:06:44 -07003427
3428/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 * ib_post_send - Posts a list of work requests to the send queue of
3430 * the specified QP.
3431 * @qp: The QP to post the work request on.
3432 * @send_wr: A list of work requests to post on the send queue.
3433 * @bad_send_wr: On an immediate failure, this parameter will reference
3434 * the work request that failed to be posted on the QP.
Bart Van Assche55464d42009-12-09 14:20:04 -08003435 *
3436 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3437 * error is returned, the QP state shall not be affected,
3438 * ib_post_send() will return an immediate error after queueing any
3439 * earlier work requests in the list.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440 */
3441static inline int ib_post_send(struct ib_qp *qp,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003442 const struct ib_send_wr *send_wr,
3443 const struct ib_send_wr **bad_send_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003445 const struct ib_send_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003446
3447 return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448}
3449
3450/**
3451 * ib_post_recv - Posts a list of work requests to the receive queue of
3452 * the specified QP.
3453 * @qp: The QP to post the work request on.
3454 * @recv_wr: A list of work requests to post on the receive queue.
3455 * @bad_recv_wr: On an immediate failure, this parameter will reference
3456 * the work request that failed to be posted on the QP.
3457 */
3458static inline int ib_post_recv(struct ib_qp *qp,
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003459 const struct ib_recv_wr *recv_wr,
3460 const struct ib_recv_wr **bad_recv_wr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461{
Bart Van Assched34ac5c2018-07-18 09:25:32 -07003462 const struct ib_recv_wr *dummy;
Bart Van Asschebb039a82018-07-18 09:25:16 -07003463
3464 return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465}
3466
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02003467struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
3468 int nr_cqe, int comp_vector,
3469 enum ib_poll_context poll_ctx, const char *caller);
3470#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
3471 __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
3472
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08003473void ib_free_cq(struct ib_cq *cq);
3474int ib_process_cq_direct(struct ib_cq *cq, int budget);
3475
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476/**
3477 * ib_create_cq - Creates a CQ on the specified device.
3478 * @device: The device on which to create the CQ.
3479 * @comp_handler: A user-specified callback that is invoked when a
3480 * completion event occurs on the CQ.
3481 * @event_handler: A user-specified callback that is invoked when an
3482 * asynchronous event not associated with a completion occurs on the CQ.
3483 * @cq_context: Context associated with the CQ returned to the user via
3484 * the associated completion and event handlers.
Matan Barak8e372102015-06-11 16:35:21 +03003485 * @cq_attr: The attributes the CQ should be created upon.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 *
3487 * Users can examine the cq structure to determine the actual CQ size.
3488 */
Bharat Potnuri7350cdd2018-06-15 20:52:33 +05303489struct ib_cq *__ib_create_cq(struct ib_device *device,
3490 ib_comp_handler comp_handler,
3491 void (*event_handler)(struct ib_event *, void *),
3492 void *cq_context,
3493 const struct ib_cq_init_attr *cq_attr,
3494 const char *caller);
3495#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3496 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
3498/**
3499 * ib_resize_cq - Modifies the capacity of the CQ.
3500 * @cq: The CQ to resize.
3501 * @cqe: The minimum size of the CQ.
3502 *
3503 * Users can examine the cq structure to determine the actual CQ size.
3504 */
3505int ib_resize_cq(struct ib_cq *cq, int cqe);
3506
3507/**
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02003508 * rdma_set_cq_moderation - Modifies moderation params of the CQ
Eli Cohen2dd57162008-04-16 21:09:33 -07003509 * @cq: The CQ to modify.
3510 * @cq_count: number of CQEs that will trigger an event
3511 * @cq_period: max period of time in usec before triggering an event
3512 *
3513 */
Leon Romanovsky4190b4e2017-11-13 10:51:19 +02003514int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
Eli Cohen2dd57162008-04-16 21:09:33 -07003515
3516/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 * ib_destroy_cq - Destroys the specified CQ.
3518 * @cq: The CQ to destroy.
3519 */
3520int ib_destroy_cq(struct ib_cq *cq);
3521
3522/**
3523 * ib_poll_cq - poll a CQ for completion(s)
3524 * @cq:the CQ being polled
3525 * @num_entries:maximum number of completions to return
3526 * @wc:array of at least @num_entries &struct ib_wc where completions
3527 * will be returned
3528 *
3529 * Poll a CQ for (possibly multiple) completions. If the return value
3530 * is < 0, an error occurred. If the return value is >= 0, it is the
3531 * number of completions returned. If the return value is
3532 * non-negative and < num_entries, then the CQ was emptied.
3533 */
3534static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3535 struct ib_wc *wc)
3536{
3537 return cq->device->poll_cq(cq, num_entries, wc);
3538}
3539
3540/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 * ib_req_notify_cq - Request completion notification on a CQ.
3542 * @cq: The CQ to generate an event for.
Roland Dreiered23a722007-05-06 21:02:48 -07003543 * @flags:
3544 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3545 * to request an event on the next solicited event or next work
3546 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3547 * may also be |ed in to request a hint about missed events, as
3548 * described below.
3549 *
3550 * Return Value:
3551 * < 0 means an error occurred while requesting notification
3552 * == 0 means notification was requested successfully, and if
3553 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3554 * were missed and it is safe to wait for another event. In
3555 * this case is it guaranteed that any work completions added
3556 * to the CQ since the last CQ poll will trigger a completion
3557 * notification event.
3558 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3559 * in. It means that the consumer must poll the CQ again to
3560 * make sure it is empty to avoid missing an event because of a
3561 * race between requesting notification and an entry being
3562 * added to the CQ. This return value means it is possible
3563 * (but not guaranteed) that a work completion has been added
3564 * to the CQ since the last poll without triggering a
3565 * completion notification event.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566 */
3567static inline int ib_req_notify_cq(struct ib_cq *cq,
Roland Dreiered23a722007-05-06 21:02:48 -07003568 enum ib_cq_notify_flags flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569{
Roland Dreiered23a722007-05-06 21:02:48 -07003570 return cq->device->req_notify_cq(cq, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571}
3572
3573/**
3574 * ib_req_ncomp_notif - Request completion notification when there are
3575 * at least the specified number of unreaped completions on the CQ.
3576 * @cq: The CQ to generate an event for.
3577 * @wc_cnt: The number of unreaped completions that should be on the
3578 * CQ before an event is generated.
3579 */
3580static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3581{
3582 return cq->device->req_ncomp_notif ?
3583 cq->device->req_ncomp_notif(cq, wc_cnt) :
3584 -ENOSYS;
3585}
3586
3587/**
Ralph Campbell9b513092006-12-12 14:27:41 -08003588 * ib_dma_mapping_error - check a DMA addr for error
3589 * @dev: The device for which the dma_addr was created
3590 * @dma_addr: The DMA address to check
3591 */
3592static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3593{
Bart Van Assche0957c292017-03-07 22:56:53 +00003594 return dma_mapping_error(dev->dma_device, dma_addr);
Ralph Campbell9b513092006-12-12 14:27:41 -08003595}
3596
3597/**
3598 * ib_dma_map_single - Map a kernel virtual address to DMA address
3599 * @dev: The device for which the dma_addr is to be created
3600 * @cpu_addr: The kernel virtual address
3601 * @size: The size of the region in bytes
3602 * @direction: The direction of the DMA
3603 */
3604static inline u64 ib_dma_map_single(struct ib_device *dev,
3605 void *cpu_addr, size_t size,
3606 enum dma_data_direction direction)
3607{
Bart Van Assche0957c292017-03-07 22:56:53 +00003608 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003609}
3610
3611/**
3612 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3613 * @dev: The device for which the DMA address was created
3614 * @addr: The DMA address
3615 * @size: The size of the region in bytes
3616 * @direction: The direction of the DMA
3617 */
3618static inline void ib_dma_unmap_single(struct ib_device *dev,
3619 u64 addr, size_t size,
3620 enum dma_data_direction direction)
3621{
Bart Van Assche0957c292017-03-07 22:56:53 +00003622 dma_unmap_single(dev->dma_device, addr, size, direction);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07003623}
3624
Ralph Campbell9b513092006-12-12 14:27:41 -08003625/**
3626 * ib_dma_map_page - Map a physical page to DMA address
3627 * @dev: The device for which the dma_addr is to be created
3628 * @page: The page to be mapped
3629 * @offset: The offset within the page
3630 * @size: The size of the region in bytes
3631 * @direction: The direction of the DMA
3632 */
3633static inline u64 ib_dma_map_page(struct ib_device *dev,
3634 struct page *page,
3635 unsigned long offset,
3636 size_t size,
3637 enum dma_data_direction direction)
3638{
Bart Van Assche0957c292017-03-07 22:56:53 +00003639 return dma_map_page(dev->dma_device, page, offset, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003640}
3641
3642/**
3643 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3644 * @dev: The device for which the DMA address was created
3645 * @addr: The DMA address
3646 * @size: The size of the region in bytes
3647 * @direction: The direction of the DMA
3648 */
3649static inline void ib_dma_unmap_page(struct ib_device *dev,
3650 u64 addr, size_t size,
3651 enum dma_data_direction direction)
3652{
Bart Van Assche0957c292017-03-07 22:56:53 +00003653 dma_unmap_page(dev->dma_device, addr, size, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003654}
3655
3656/**
3657 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3658 * @dev: The device for which the DMA addresses are to be created
3659 * @sg: The array of scatter/gather entries
3660 * @nents: The number of scatter/gather entries
3661 * @direction: The direction of the DMA
3662 */
3663static inline int ib_dma_map_sg(struct ib_device *dev,
3664 struct scatterlist *sg, int nents,
3665 enum dma_data_direction direction)
3666{
Bart Van Assche0957c292017-03-07 22:56:53 +00003667 return dma_map_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003668}
3669
3670/**
3671 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3672 * @dev: The device for which the DMA addresses were created
3673 * @sg: The array of scatter/gather entries
3674 * @nents: The number of scatter/gather entries
3675 * @direction: The direction of the DMA
3676 */
3677static inline void ib_dma_unmap_sg(struct ib_device *dev,
3678 struct scatterlist *sg, int nents,
3679 enum dma_data_direction direction)
3680{
Bart Van Assche0957c292017-03-07 22:56:53 +00003681 dma_unmap_sg(dev->dma_device, sg, nents, direction);
Ralph Campbell9b513092006-12-12 14:27:41 -08003682}
3683
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07003684static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3685 struct scatterlist *sg, int nents,
3686 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003687 unsigned long dma_attrs)
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07003688{
Bart Van Assche0957c292017-03-07 22:56:53 +00003689 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3690 dma_attrs);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07003691}
3692
3693static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3694 struct scatterlist *sg, int nents,
3695 enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07003696 unsigned long dma_attrs)
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07003697{
Bart Van Assche0957c292017-03-07 22:56:53 +00003698 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
Arthur Kepnercb9fbc52008-04-29 01:00:34 -07003699}
Ralph Campbell9b513092006-12-12 14:27:41 -08003700/**
3701 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3702 * @dev: The device for which the DMA addresses were created
3703 * @sg: The scatter/gather entry
Mike Marciniszynea58a592014-03-28 13:26:59 -04003704 *
3705 * Note: this function is obsolete. To do: change all occurrences of
3706 * ib_sg_dma_address() into sg_dma_address().
Ralph Campbell9b513092006-12-12 14:27:41 -08003707 */
3708static inline u64 ib_sg_dma_address(struct ib_device *dev,
3709 struct scatterlist *sg)
3710{
Ben Collinsd1998ef2006-12-13 22:10:05 -05003711 return sg_dma_address(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08003712}
3713
3714/**
3715 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3716 * @dev: The device for which the DMA addresses were created
3717 * @sg: The scatter/gather entry
Mike Marciniszynea58a592014-03-28 13:26:59 -04003718 *
3719 * Note: this function is obsolete. To do: change all occurrences of
3720 * ib_sg_dma_len() into sg_dma_len().
Ralph Campbell9b513092006-12-12 14:27:41 -08003721 */
3722static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3723 struct scatterlist *sg)
3724{
Ben Collinsd1998ef2006-12-13 22:10:05 -05003725 return sg_dma_len(sg);
Ralph Campbell9b513092006-12-12 14:27:41 -08003726}
3727
3728/**
3729 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3730 * @dev: The device for which the DMA address was created
3731 * @addr: The DMA address
3732 * @size: The size of the region in bytes
3733 * @dir: The direction of the DMA
3734 */
3735static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3736 u64 addr,
3737 size_t size,
3738 enum dma_data_direction dir)
3739{
Bart Van Assche0957c292017-03-07 22:56:53 +00003740 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08003741}
3742
3743/**
3744 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3745 * @dev: The device for which the DMA address was created
3746 * @addr: The DMA address
3747 * @size: The size of the region in bytes
3748 * @dir: The direction of the DMA
3749 */
3750static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3751 u64 addr,
3752 size_t size,
3753 enum dma_data_direction dir)
3754{
Bart Van Assche0957c292017-03-07 22:56:53 +00003755 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
Ralph Campbell9b513092006-12-12 14:27:41 -08003756}
3757
3758/**
3759 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3760 * @dev: The device for which the DMA address is requested
3761 * @size: The size of the region to allocate in bytes
3762 * @dma_handle: A pointer for returning the DMA address of the region
3763 * @flag: memory allocator flags
3764 */
3765static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3766 size_t size,
Bart Van Assched43dbac2017-01-20 13:04:10 -08003767 dma_addr_t *dma_handle,
Ralph Campbell9b513092006-12-12 14:27:41 -08003768 gfp_t flag)
3769{
Bart Van Assche0957c292017-03-07 22:56:53 +00003770 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
Ralph Campbell9b513092006-12-12 14:27:41 -08003771}
3772
3773/**
3774 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3775 * @dev: The device for which the DMA addresses were allocated
3776 * @size: The size of the region
3777 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3778 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3779 */
3780static inline void ib_dma_free_coherent(struct ib_device *dev,
3781 size_t size, void *cpu_addr,
Bart Van Assched43dbac2017-01-20 13:04:10 -08003782 dma_addr_t dma_handle)
Ralph Campbell9b513092006-12-12 14:27:41 -08003783{
Bart Van Assche0957c292017-03-07 22:56:53 +00003784 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
Ralph Campbell9b513092006-12-12 14:27:41 -08003785}
3786
3787/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788 * ib_dereg_mr - Deregisters a memory region and removes it from the
3789 * HCA translation table.
3790 * @mr: The memory region to deregister.
Shani Michaeli7083e422013-02-06 16:19:12 +00003791 *
3792 * This function can fail, if the memory region has memory windows bound to it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 */
3794int ib_dereg_mr(struct ib_mr *mr);
3795
Sagi Grimberg9bee1782015-07-30 10:32:35 +03003796struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3797 enum ib_mr_type mr_type,
3798 u32 max_num_sg);
Steve Wise00f7ec32008-07-14 23:48:45 -07003799
3800/**
Steve Wise00f7ec32008-07-14 23:48:45 -07003801 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3802 * R_Key and L_Key.
3803 * @mr - struct ib_mr pointer to be updated.
3804 * @newkey - new key to be used.
3805 */
3806static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3807{
3808 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3809 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3810}
3811
3812/**
Shani Michaeli7083e422013-02-06 16:19:12 +00003813 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3814 * for calculating a new rkey for type 2 memory windows.
3815 * @rkey - the rkey to increment.
3816 */
3817static inline u32 ib_inc_rkey(u32 rkey)
3818{
3819 const u32 mask = 0x000000ff;
3820 return ((rkey + 1) & mask) | (rkey & ~mask);
3821}
3822
3823/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 * ib_alloc_fmr - Allocates a unmapped fast memory region.
3825 * @pd: The protection domain associated with the unmapped region.
3826 * @mr_access_flags: Specifies the memory access rights.
3827 * @fmr_attr: Attributes of the unmapped region.
3828 *
3829 * A fast memory region must be mapped before it can be used as part of
3830 * a work request.
3831 */
3832struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3833 int mr_access_flags,
3834 struct ib_fmr_attr *fmr_attr);
3835
3836/**
3837 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3838 * @fmr: The fast memory region to associate with the pages.
3839 * @page_list: An array of physical pages to map to the fast memory region.
3840 * @list_len: The number of pages in page_list.
3841 * @iova: The I/O virtual address to use with the mapped region.
3842 */
3843static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3844 u64 *page_list, int list_len,
3845 u64 iova)
3846{
3847 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3848}
3849
3850/**
3851 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3852 * @fmr_list: A linked list of fast memory regions to unmap.
3853 */
3854int ib_unmap_fmr(struct list_head *fmr_list);
3855
3856/**
3857 * ib_dealloc_fmr - Deallocates a fast memory region.
3858 * @fmr: The fast memory region to deallocate.
3859 */
3860int ib_dealloc_fmr(struct ib_fmr *fmr);
3861
3862/**
3863 * ib_attach_mcast - Attaches the specified QP to a multicast group.
3864 * @qp: QP to attach to the multicast group. The QP must be type
3865 * IB_QPT_UD.
3866 * @gid: Multicast group GID.
3867 * @lid: Multicast group LID in host byte order.
3868 *
3869 * In order to send and receive multicast packets, subnet
3870 * administration must have created the multicast group and configured
3871 * the fabric appropriately. The port associated with the specified
3872 * QP must also be a member of the multicast group.
3873 */
3874int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3875
3876/**
3877 * ib_detach_mcast - Detaches the specified QP from a multicast group.
3878 * @qp: QP to detach from the multicast group.
3879 * @gid: Multicast group GID.
3880 * @lid: Multicast group LID in host byte order.
3881 */
3882int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3883
Sean Hefty59991f92011-05-23 17:52:46 -07003884/**
3885 * ib_alloc_xrcd - Allocates an XRC domain.
3886 * @device: The device on which to allocate the XRC domain.
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02003887 * @caller: Module name for kernel consumers
Sean Hefty59991f92011-05-23 17:52:46 -07003888 */
Leon Romanovskyf66c8ba2018-01-28 11:17:19 +02003889struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
3890#define ib_alloc_xrcd(device) \
3891 __ib_alloc_xrcd((device), KBUILD_MODNAME)
Sean Hefty59991f92011-05-23 17:52:46 -07003892
3893/**
3894 * ib_dealloc_xrcd - Deallocates an XRC domain.
3895 * @xrcd: The XRC domain to deallocate.
3896 */
3897int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3898
Eli Cohen1c636f82013-10-31 15:26:32 +02003899static inline int ib_check_mr_access(int flags)
3900{
3901 /*
3902 * Local write permission is required if remote write or
3903 * remote atomic permission is also requested.
3904 */
3905 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3906 !(flags & IB_ACCESS_LOCAL_WRITE))
3907 return -EINVAL;
3908
3909 return 0;
3910}
3911
Jack Morgenstein08bb5582018-05-23 15:30:30 +03003912static inline bool ib_access_writable(int access_flags)
3913{
3914 /*
3915 * We have writable memory backing the MR if any of the following
3916 * access flags are set. "Local write" and "remote write" obviously
3917 * require write access. "Remote atomic" can do things like fetch and
3918 * add, which will modify memory, and "MW bind" can change permissions
3919 * by binding a window.
3920 */
3921 return access_flags &
3922 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
3923 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
3924}
3925
Sagi Grimberg1b01d332014-02-23 14:19:05 +02003926/**
3927 * ib_check_mr_status: lightweight check of MR status.
3928 * This routine may provide status checks on a selected
3929 * ib_mr. first use is for signature status check.
3930 *
3931 * @mr: A memory region.
3932 * @check_mask: Bitmask of which checks to perform from
3933 * ib_mr_status_check enumeration.
3934 * @mr_status: The container of relevant status checks.
3935 * failed checks will be indicated in the status bitmask
3936 * and the relevant info shall be in the error item.
3937 */
3938int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3939 struct ib_mr_status *mr_status);
3940
Yotam Kenneth9268f722015-07-30 17:50:15 +03003941struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3942 u16 pkey, const union ib_gid *gid,
3943 const struct sockaddr *addr);
Yishai Hadas5fd251c2016-05-23 15:20:48 +03003944struct ib_wq *ib_create_wq(struct ib_pd *pd,
3945 struct ib_wq_init_attr *init_attr);
3946int ib_destroy_wq(struct ib_wq *wq);
3947int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3948 u32 wq_attr_mask);
Yishai Hadas6d397862016-05-23 15:20:51 +03003949struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3950 struct ib_rwq_ind_table_init_attr*
3951 wq_ind_table_init_attr);
3952int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
Yotam Kenneth9268f722015-07-30 17:50:15 +03003953
Christoph Hellwigff2ba992016-05-03 18:01:04 +02003954int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07003955 unsigned int *sg_offset, unsigned int page_size);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03003956
3957static inline int
Christoph Hellwigff2ba992016-05-03 18:01:04 +02003958ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07003959 unsigned int *sg_offset, unsigned int page_size)
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03003960{
3961 int n;
3962
Christoph Hellwigff2ba992016-05-03 18:01:04 +02003963 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03003964 mr->iova = 0;
3965
3966 return n;
3967}
3968
Christoph Hellwigff2ba992016-05-03 18:01:04 +02003969int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07003970 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
Sagi Grimberg4c67e2b2015-10-13 19:11:24 +03003971
Steve Wise765d6772016-02-17 08:15:41 -08003972void ib_drain_rq(struct ib_qp *qp);
3973void ib_drain_sq(struct ib_qp *qp);
3974void ib_drain_qp(struct ib_qp *qp);
Moni Shoua850d8fd2016-11-10 11:30:56 +02003975
Yuval Shaiad4186192017-06-14 23:13:34 +03003976int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04003977
3978static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3979{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04003980 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3981 return attr->roce.dmac;
3982 return NULL;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04003983}
3984
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04003985static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04003986{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04003987 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04003988 attr->ib.dlid = (u16)dlid;
3989 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3990 attr->opa.dlid = dlid;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04003991}
3992
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04003993static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04003994{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04003995 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3996 return attr->ib.dlid;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04003997 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3998 return attr->opa.dlid;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04003999 return 0;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004000}
4001
4002static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4003{
4004 attr->sl = sl;
4005}
4006
4007static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4008{
4009 return attr->sl;
4010}
4011
4012static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4013 u8 src_path_bits)
4014{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004015 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4016 attr->ib.src_path_bits = src_path_bits;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004017 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4018 attr->opa.src_path_bits = src_path_bits;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004019}
4020
4021static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4022{
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004023 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4024 return attr->ib.src_path_bits;
Dasaratharaman Chandramouli64b46462017-04-29 14:41:30 -04004025 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4026 return attr->opa.src_path_bits;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004027 return 0;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004028}
4029
Don Hiattd98bb7f2017-08-04 13:54:16 -07004030static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4031 bool make_grd)
4032{
4033 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4034 attr->opa.make_grd = make_grd;
4035}
4036
4037static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4038{
4039 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4040 return attr->opa.make_grd;
4041 return false;
4042}
4043
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004044static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4045{
4046 attr->port_num = port_num;
4047}
4048
4049static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4050{
4051 return attr->port_num;
4052}
4053
4054static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4055 u8 static_rate)
4056{
4057 attr->static_rate = static_rate;
4058}
4059
4060static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4061{
4062 return attr->static_rate;
4063}
4064
4065static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4066 enum ib_ah_flags flag)
4067{
4068 attr->ah_flags = flag;
4069}
4070
4071static inline enum ib_ah_flags
4072 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4073{
4074 return attr->ah_flags;
4075}
4076
4077static inline const struct ib_global_route
4078 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4079{
4080 return &attr->grh;
4081}
4082
4083/*To retrieve and modify the grh */
4084static inline struct ib_global_route
4085 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4086{
4087 return &attr->grh;
4088}
4089
4090static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4091{
4092 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4093
4094 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4095}
4096
4097static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4098 __be64 prefix)
4099{
4100 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4101
4102 grh->dgid.global.subnet_prefix = prefix;
4103}
4104
4105static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4106 __be64 if_id)
4107{
4108 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4109
4110 grh->dgid.global.interface_id = if_id;
4111}
4112
4113static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4114 union ib_gid *dgid, u32 flow_label,
4115 u8 sgid_index, u8 hop_limit,
4116 u8 traffic_class)
4117{
4118 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4119
4120 attr->ah_flags = IB_AH_GRH;
4121 if (dgid)
4122 grh->dgid = *dgid;
4123 grh->flow_label = flow_label;
4124 grh->sgid_index = sgid_index;
4125 grh->hop_limit = hop_limit;
4126 grh->traffic_class = traffic_class;
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004127 grh->sgid_attr = NULL;
Dasaratharaman Chandramouli2224c472017-04-29 14:41:27 -04004128}
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004129
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004130void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4131void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4132 u32 flow_label, u8 hop_limit, u8 traffic_class,
4133 const struct ib_gid_attr *sgid_attr);
Jason Gunthorped97099f2018-06-13 10:22:05 +03004134void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4135 const struct rdma_ah_attr *src);
4136void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4137 const struct rdma_ah_attr *new);
4138void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
Jason Gunthorpe8d9ec9a2018-06-13 10:22:03 +03004139
Don Hiatt87daac62018-02-01 10:57:03 -08004140/**
4141 * rdma_ah_find_type - Return address handle type.
4142 *
4143 * @dev: Device to be checked
4144 * @port_num: Port number
4145 */
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004146static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
Don Hiatt87daac62018-02-01 10:57:03 -08004147 u8 port_num)
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004148{
Parav Pandita6532e72018-01-12 07:58:42 +02004149 if (rdma_protocol_roce(dev, port_num))
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004150 return RDMA_AH_ATTR_TYPE_ROCE;
Don Hiatt87daac62018-02-01 10:57:03 -08004151 if (rdma_protocol_ib(dev, port_num)) {
4152 if (rdma_cap_opa_ah(dev, port_num))
4153 return RDMA_AH_ATTR_TYPE_OPA;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004154 return RDMA_AH_ATTR_TYPE_IB;
Don Hiatt87daac62018-02-01 10:57:03 -08004155 }
4156
4157 return RDMA_AH_ATTR_TYPE_UNDEFINED;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04004158}
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004159
Hiatt, Don62ede772017-08-14 14:17:43 -04004160/**
4161 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4162 * In the current implementation the only way to get
4163 * get the 32bit lid is from other sources for OPA.
4164 * For IB, lids will always be 16bits so cast the
4165 * value accordingly.
4166 *
4167 * @lid: A 32bit LID
4168 */
4169static inline u16 ib_lid_cpu16(u32 lid)
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004170{
Hiatt, Don62ede772017-08-14 14:17:43 -04004171 WARN_ON_ONCE(lid & 0xFFFF0000);
4172 return (u16)lid;
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004173}
4174
Hiatt, Don62ede772017-08-14 14:17:43 -04004175/**
4176 * ib_lid_be16 - Return lid in 16bit BE encoding.
4177 *
4178 * @lid: A 32bit LID
4179 */
4180static inline __be16 ib_lid_be16(u32 lid)
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004181{
Hiatt, Don62ede772017-08-14 14:17:43 -04004182 WARN_ON_ONCE(lid & 0xFFFF0000);
4183 return cpu_to_be16((u16)lid);
Hiatt, Don7db20ec2017-06-08 13:37:49 -04004184}
Doug Ledford32043832017-08-10 14:31:29 -04004185
Sagi Grimbergc66cd352017-07-13 11:09:41 +03004186/**
4187 * ib_get_vector_affinity - Get the affinity mappings of a given completion
4188 * vector
4189 * @device: the rdma device
4190 * @comp_vector: index of completion vector
4191 *
4192 * Returns NULL on failure, otherwise a corresponding cpu map of the
4193 * completion vector (returns all-cpus map if the device driver doesn't
4194 * implement get_vector_affinity).
4195 */
4196static inline const struct cpumask *
4197ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4198{
4199 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4200 !device->get_vector_affinity)
4201 return NULL;
4202
4203 return device->get_vector_affinity(device, comp_vector);
4204
4205}
4206
Daniel Jurgens32f69e42018-01-04 17:25:36 +02004207/**
4208 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4209 * and add their gids, as needed, to the relevant RoCE devices.
4210 *
4211 * @device: the rdma device
4212 */
4213void rdma_roce_rescan_device(struct ib_device *ibdev);
4214
Jason Gunthorpe8313c102018-11-25 20:51:13 +02004215struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
Yishai Hadas7dc08dc2018-06-17 12:59:59 +03004216
Jason Gunthorpe15a1b4b2018-11-25 20:51:15 +02004217
4218int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
Denis Drozdovf6a8a192018-08-14 14:08:51 +03004219
4220struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4221 enum rdma_netdev_t type, const char *name,
4222 unsigned char name_assign_type,
4223 void (*setup)(struct net_device *));
Denis Drozdov5d6b0cb2018-08-14 14:22:35 +03004224
4225int rdma_init_netdev(struct ib_device *device, u8 port_num,
4226 enum rdma_netdev_t type, const char *name,
4227 unsigned char name_assign_type,
4228 void (*setup)(struct net_device *),
4229 struct net_device *netdev);
4230
Parav Panditd4122f52018-10-11 22:31:53 +03004231/**
4232 * rdma_set_device_sysfs_group - Set device attributes group to have
4233 * driver specific sysfs entries at
4234 * for infiniband class.
4235 *
4236 * @device: device pointer for which attributes to be created
4237 * @group: Pointer to group which should be added when device
4238 * is registered with sysfs.
4239 * rdma_set_device_sysfs_group() allows existing drivers to expose one
4240 * group per device to have sysfs attributes.
4241 *
4242 * NOTE: New drivers should not make use of this API; instead new device
4243 * parameter should be exposed via netlink command. This API and mechanism
4244 * exist only for existing drivers.
4245 */
4246static inline void
4247rdma_set_device_sysfs_group(struct ib_device *dev,
4248 const struct attribute_group *group)
4249{
4250 dev->groups[1] = group;
4251}
4252
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253#endif /* IB_VERBS_H */