Leon Romanovsky | 6bf9d8f | 2020-07-19 10:25:21 +0300 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. |
| 4 | * Copyright (c) 2004 Infinicon Corporation. All rights reserved. |
Jianxin Xiong | 3bc489e | 2020-12-15 13:27:14 -0800 | [diff] [blame] | 5 | * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
| 7 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
Roland Dreier | 2a1d9b7 | 2005-08-10 23:03:10 -0700 | [diff] [blame] | 8 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
Roland Dreier | f7c6a7b | 2007-03-04 16:15:11 -0800 | [diff] [blame] | 9 | * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Leon Romanovsky | 6bf9d8f | 2020-07-19 10:25:21 +0300 | [diff] [blame] | 12 | #ifndef IB_VERBS_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #define IB_VERBS_H |
| 14 | |
Jakub Kicinski | cc69837 | 2020-11-20 14:50:52 -0800 | [diff] [blame] | 15 | #include <linux/ethtool.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/types.h> |
| 17 | #include <linux/device.h> |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 18 | #include <linux/dma-mapping.h> |
Michael S. Tsirkin | 459d6e2 | 2007-02-04 14:11:55 -0800 | [diff] [blame] | 19 | #include <linux/kref.h> |
Dotan Barak | bfb3ea1 | 2007-07-31 16:49:15 +0300 | [diff] [blame] | 20 | #include <linux/list.h> |
| 21 | #include <linux/rwsem.h> |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 22 | #include <linux/workqueue.h> |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 23 | #include <linux/irq_poll.h> |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 24 | #include <uapi/linux/if_ether.h> |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 25 | #include <net/ipv6.h> |
| 26 | #include <net/ip.h> |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 27 | #include <linux/string.h> |
| 28 | #include <linux/slab.h> |
Vishwanathapura, Niranjana | 2fc7757 | 2017-04-12 20:29:20 -0700 | [diff] [blame] | 29 | #include <linux/netdevice.h> |
Parav Pandit | 01b6711 | 2018-11-16 03:50:57 +0200 | [diff] [blame] | 30 | #include <linux/refcount.h> |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 31 | #include <linux/if_link.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 32 | #include <linux/atomic.h> |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 33 | #include <linux/mmu_notifier.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 34 | #include <linux/uaccess.h> |
Parav Pandit | 43579b5 | 2017-01-10 00:02:14 +0000 | [diff] [blame] | 35 | #include <linux/cgroup_rdma.h> |
Leon Romanovsky | f631603 | 2019-03-28 15:12:58 +0200 | [diff] [blame] | 36 | #include <linux/irqflags.h> |
| 37 | #include <linux/preempt.h> |
Yamin Friedman | da66297 | 2019-07-08 13:59:03 +0300 | [diff] [blame] | 38 | #include <linux/dim.h> |
Nicolas Dichtel | ea6819e | 2017-03-27 14:20:14 +0200 | [diff] [blame] | 39 | #include <uapi/rdma/ib_user_verbs.h> |
Mark Zhang | 413d334 | 2019-07-02 13:02:34 +0300 | [diff] [blame] | 40 | #include <rdma/rdma_counter.h> |
Leon Romanovsky | 02d8883 | 2018-01-28 11:17:20 +0200 | [diff] [blame] | 41 | #include <rdma/restrack.h> |
Max Gurtovoy | 36b1e47 | 2019-06-11 18:52:37 +0300 | [diff] [blame] | 42 | #include <rdma/signature.h> |
Matan Barak | 0ede73b | 2018-03-19 15:02:34 +0200 | [diff] [blame] | 43 | #include <uapi/rdma/rdma_user_ioctl.h> |
Matan Barak | 2eb9bea | 2018-03-28 09:27:45 +0300 | [diff] [blame] | 44 | #include <uapi/rdma/ib_user_ioctl_verbs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Leon Romanovsky | 9abb0d1 | 2017-06-27 16:49:53 +0300 | [diff] [blame] | 46 | #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN |
| 47 | |
Jason Gunthorpe | b5231b0 | 2018-09-16 20:48:04 +0300 | [diff] [blame] | 48 | struct ib_umem_odp; |
Jason Gunthorpe | 620d3f8 | 2020-01-08 19:21:59 +0200 | [diff] [blame] | 49 | struct ib_uqp_object; |
Jason Gunthorpe | 9fbe334 | 2020-01-08 19:22:00 +0200 | [diff] [blame] | 50 | struct ib_usrq_object; |
Jason Gunthorpe | e04dd13 | 2020-01-08 19:22:01 +0200 | [diff] [blame] | 51 | struct ib_uwq_object; |
Maor Gottlieb | 211cd94 | 2020-06-23 14:30:39 +0300 | [diff] [blame] | 52 | struct rdma_cm_id; |
Jason Gunthorpe | d8a5883 | 2021-06-11 19:00:21 +0300 | [diff] [blame] | 53 | struct ib_port; |
Jason Gunthorpe | 467f432 | 2021-06-11 19:00:22 +0300 | [diff] [blame] | 54 | struct hw_stats_device_data; |
Jason Gunthorpe | b5231b0 | 2018-09-16 20:48:04 +0300 | [diff] [blame] | 55 | |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 56 | extern struct workqueue_struct *ib_wq; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 57 | extern struct workqueue_struct *ib_comp_wq; |
Jack Morgenstein | f794809 | 2018-08-27 08:35:55 +0300 | [diff] [blame] | 58 | extern struct workqueue_struct *ib_comp_unbound_wq; |
Tejun Heo | f062671 | 2010-10-19 15:24:36 +0000 | [diff] [blame] | 59 | |
Jason Gunthorpe | 5bd48c1 | 2020-01-08 19:21:58 +0200 | [diff] [blame] | 60 | struct ib_ucq_object; |
| 61 | |
Gal Pressman | 923abb9 | 2019-05-01 13:48:13 +0300 | [diff] [blame] | 62 | __printf(3, 4) __cold |
| 63 | void ibdev_printk(const char *level, const struct ib_device *ibdev, |
| 64 | const char *format, ...); |
| 65 | __printf(2, 3) __cold |
| 66 | void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...); |
| 67 | __printf(2, 3) __cold |
| 68 | void ibdev_alert(const struct ib_device *ibdev, const char *format, ...); |
| 69 | __printf(2, 3) __cold |
| 70 | void ibdev_crit(const struct ib_device *ibdev, const char *format, ...); |
| 71 | __printf(2, 3) __cold |
| 72 | void ibdev_err(const struct ib_device *ibdev, const char *format, ...); |
| 73 | __printf(2, 3) __cold |
| 74 | void ibdev_warn(const struct ib_device *ibdev, const char *format, ...); |
| 75 | __printf(2, 3) __cold |
| 76 | void ibdev_notice(const struct ib_device *ibdev, const char *format, ...); |
| 77 | __printf(2, 3) __cold |
| 78 | void ibdev_info(const struct ib_device *ibdev, const char *format, ...); |
| 79 | |
Orson Zhai | ceabef7 | 2020-06-07 21:40:14 -0700 | [diff] [blame] | 80 | #if defined(CONFIG_DYNAMIC_DEBUG) || \ |
| 81 | (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
Gal Pressman | 923abb9 | 2019-05-01 13:48:13 +0300 | [diff] [blame] | 82 | #define ibdev_dbg(__dev, format, args...) \ |
| 83 | dynamic_ibdev_dbg(__dev, format, ##args) |
Gal Pressman | 923abb9 | 2019-05-01 13:48:13 +0300 | [diff] [blame] | 84 | #else |
| 85 | __printf(2, 3) __cold |
| 86 | static inline |
| 87 | void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} |
| 88 | #endif |
| 89 | |
Gal Pressman | 05bb411 | 2019-08-01 20:14:46 +0300 | [diff] [blame] | 90 | #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \ |
| 91 | do { \ |
| 92 | static DEFINE_RATELIMIT_STATE(_rs, \ |
| 93 | DEFAULT_RATELIMIT_INTERVAL, \ |
| 94 | DEFAULT_RATELIMIT_BURST); \ |
| 95 | if (__ratelimit(&_rs)) \ |
| 96 | ibdev_level(ibdev, fmt, ##__VA_ARGS__); \ |
| 97 | } while (0) |
| 98 | |
| 99 | #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \ |
| 100 | ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__) |
| 101 | #define ibdev_alert_ratelimited(ibdev, fmt, ...) \ |
| 102 | ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__) |
| 103 | #define ibdev_crit_ratelimited(ibdev, fmt, ...) \ |
| 104 | ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__) |
| 105 | #define ibdev_err_ratelimited(ibdev, fmt, ...) \ |
| 106 | ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__) |
| 107 | #define ibdev_warn_ratelimited(ibdev, fmt, ...) \ |
| 108 | ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__) |
| 109 | #define ibdev_notice_ratelimited(ibdev, fmt, ...) \ |
| 110 | ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__) |
| 111 | #define ibdev_info_ratelimited(ibdev, fmt, ...) \ |
| 112 | ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) |
| 113 | |
Orson Zhai | ceabef7 | 2020-06-07 21:40:14 -0700 | [diff] [blame] | 114 | #if defined(CONFIG_DYNAMIC_DEBUG) || \ |
| 115 | (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) |
Gal Pressman | 05bb411 | 2019-08-01 20:14:46 +0300 | [diff] [blame] | 116 | /* descriptor check is first to prevent flooding with "callbacks suppressed" */ |
| 117 | #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ |
| 118 | do { \ |
| 119 | static DEFINE_RATELIMIT_STATE(_rs, \ |
| 120 | DEFAULT_RATELIMIT_INTERVAL, \ |
| 121 | DEFAULT_RATELIMIT_BURST); \ |
| 122 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
| 123 | if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \ |
| 124 | __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \ |
| 125 | ##__VA_ARGS__); \ |
| 126 | } while (0) |
| 127 | #else |
| 128 | __printf(2, 3) __cold |
| 129 | static inline |
| 130 | void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {} |
| 131 | #endif |
| 132 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | union ib_gid { |
| 134 | u8 raw[16]; |
| 135 | struct { |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 136 | __be64 subnet_prefix; |
| 137 | __be64 interface_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | } global; |
| 139 | }; |
| 140 | |
Moni Shoua | e26be1b | 2015-07-30 18:33:29 +0300 | [diff] [blame] | 141 | extern union ib_gid zgid; |
| 142 | |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 143 | enum ib_gid_type { |
Avihai Horon | 9f85cbe | 2020-09-23 19:50:15 +0300 | [diff] [blame] | 144 | IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB, |
| 145 | IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1, |
| 146 | IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2, |
Matan Barak | b39ffa1 | 2015-12-23 14:56:47 +0200 | [diff] [blame] | 147 | IB_GID_TYPE_SIZE |
| 148 | }; |
| 149 | |
Moni Shoua | 7ead4bc | 2016-01-14 17:50:38 +0200 | [diff] [blame] | 150 | #define ROCE_V2_UDP_DPORT 4791 |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 151 | struct ib_gid_attr { |
Parav Pandit | 943bd98 | 2019-05-02 10:48:07 +0300 | [diff] [blame] | 152 | struct net_device __rcu *ndev; |
Parav Pandit | 598ff6b | 2018-04-01 15:08:21 +0300 | [diff] [blame] | 153 | struct ib_device *device; |
Parav Pandit | b150c38 | 2018-06-05 08:40:15 +0300 | [diff] [blame] | 154 | union ib_gid gid; |
Parav Pandit | 598ff6b | 2018-04-01 15:08:21 +0300 | [diff] [blame] | 155 | enum ib_gid_type gid_type; |
| 156 | u16 index; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 157 | u32 port_num; |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 158 | }; |
| 159 | |
Eli Cohen | a0c1b2a | 2016-03-11 22:58:37 +0200 | [diff] [blame] | 160 | enum { |
| 161 | /* set the local administered indication */ |
| 162 | IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, |
| 163 | }; |
| 164 | |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 165 | enum rdma_transport_type { |
| 166 | RDMA_TRANSPORT_IB, |
Upinder Malhi \(umalhi\) | 180771a | 2013-09-10 03:36:59 +0000 | [diff] [blame] | 167 | RDMA_TRANSPORT_IWARP, |
Upinder Malhi | 248567f | 2014-01-09 14:48:19 -0800 | [diff] [blame] | 168 | RDMA_TRANSPORT_USNIC, |
Gal Pressman | f95be3d | 2019-05-05 20:59:21 +0300 | [diff] [blame] | 169 | RDMA_TRANSPORT_USNIC_UDP, |
| 170 | RDMA_TRANSPORT_UNSPECIFIED, |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 171 | }; |
| 172 | |
Michael Wang | 6b90a6d | 2015-05-05 14:50:18 +0200 | [diff] [blame] | 173 | enum rdma_protocol_type { |
| 174 | RDMA_PROTOCOL_IB, |
| 175 | RDMA_PROTOCOL_IBOE, |
| 176 | RDMA_PROTOCOL_IWARP, |
| 177 | RDMA_PROTOCOL_USNIC_UDP |
| 178 | }; |
| 179 | |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 180 | __attribute_const__ enum rdma_transport_type |
Jason Gunthorpe | 5d60c11 | 2019-06-13 21:38:17 -0300 | [diff] [blame] | 181 | rdma_node_get_transport(unsigned int node_type); |
Tom Tucker | 07ebafb | 2006-08-03 16:02:42 -0500 | [diff] [blame] | 182 | |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 183 | enum rdma_network_type { |
| 184 | RDMA_NETWORK_IB, |
Avihai Horon | 1c15b4f | 2020-09-23 19:50:13 +0300 | [diff] [blame] | 185 | RDMA_NETWORK_ROCE_V1, |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 186 | RDMA_NETWORK_IPV4, |
| 187 | RDMA_NETWORK_IPV6 |
| 188 | }; |
| 189 | |
| 190 | static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) |
| 191 | { |
| 192 | if (network_type == RDMA_NETWORK_IPV4 || |
| 193 | network_type == RDMA_NETWORK_IPV6) |
| 194 | return IB_GID_TYPE_ROCE_UDP_ENCAP; |
Avihai Horon | 1c15b4f | 2020-09-23 19:50:13 +0300 | [diff] [blame] | 195 | else if (network_type == RDMA_NETWORK_ROCE_V1) |
| 196 | return IB_GID_TYPE_ROCE; |
| 197 | else |
| 198 | return IB_GID_TYPE_IB; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 199 | } |
| 200 | |
Parav Pandit | 47ec386 | 2018-06-13 10:22:06 +0300 | [diff] [blame] | 201 | static inline enum rdma_network_type |
| 202 | rdma_gid_attr_network_type(const struct ib_gid_attr *attr) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 203 | { |
Parav Pandit | 47ec386 | 2018-06-13 10:22:06 +0300 | [diff] [blame] | 204 | if (attr->gid_type == IB_GID_TYPE_IB) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 205 | return RDMA_NETWORK_IB; |
| 206 | |
Avihai Horon | 1c15b4f | 2020-09-23 19:50:13 +0300 | [diff] [blame] | 207 | if (attr->gid_type == IB_GID_TYPE_ROCE) |
| 208 | return RDMA_NETWORK_ROCE_V1; |
| 209 | |
Parav Pandit | 47ec386 | 2018-06-13 10:22:06 +0300 | [diff] [blame] | 210 | if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 211 | return RDMA_NETWORK_IPV4; |
| 212 | else |
| 213 | return RDMA_NETWORK_IPV6; |
| 214 | } |
| 215 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 216 | enum rdma_link_layer { |
| 217 | IB_LINK_LAYER_UNSPECIFIED, |
| 218 | IB_LINK_LAYER_INFINIBAND, |
| 219 | IB_LINK_LAYER_ETHERNET, |
| 220 | }; |
| 221 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | enum ib_device_cap_flags { |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 223 | IB_DEVICE_RESIZE_MAX_WR = (1 << 0), |
| 224 | IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), |
| 225 | IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), |
| 226 | IB_DEVICE_RAW_MULTI = (1 << 3), |
| 227 | IB_DEVICE_AUTO_PATH_MIG = (1 << 4), |
| 228 | IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), |
| 229 | IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), |
| 230 | IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), |
| 231 | IB_DEVICE_SHUTDOWN_PORT = (1 << 8), |
Leon Romanovsky | 78b57f9 | 2017-08-17 15:50:37 +0300 | [diff] [blame] | 232 | /* Not in use, former INIT_TYPE = (1 << 9),*/ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 233 | IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), |
| 234 | IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), |
| 235 | IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), |
| 236 | IB_DEVICE_SRQ_RESIZE = (1 << 13), |
| 237 | IB_DEVICE_N_NOTIFY_CQ = (1 << 14), |
Christoph Hellwig | b1adc71 | 2015-12-23 19:12:45 +0100 | [diff] [blame] | 238 | |
| 239 | /* |
| 240 | * This device supports a per-device lkey or stag that can be |
| 241 | * used without performing a memory registration for the local |
| 242 | * memory. Note that ULPs should never check this flag, but |
| 243 | * instead of use the local_dma_lkey flag in the ib_pd structure, |
| 244 | * which will always contain a usable lkey. |
| 245 | */ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 246 | IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), |
Leon Romanovsky | 78b57f9 | 2017-08-17 15:50:37 +0300 | [diff] [blame] | 247 | /* Reserved, old SEND_W_INV = (1 << 16),*/ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 248 | IB_DEVICE_MEM_WINDOW = (1 << 17), |
Eli Cohen | e0605d9 | 2008-01-30 18:30:57 +0200 | [diff] [blame] | 249 | /* |
| 250 | * Devices should set IB_DEVICE_UD_IP_SUM if they support |
| 251 | * insertion of UDP and TCP checksum on outgoing UD IPoIB |
| 252 | * messages and can verify the validity of checksum for |
| 253 | * incoming messages. Setting this flag implies that the |
| 254 | * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. |
| 255 | */ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 256 | IB_DEVICE_UD_IP_CSUM = (1 << 18), |
| 257 | IB_DEVICE_UD_TSO = (1 << 19), |
| 258 | IB_DEVICE_XRC = (1 << 20), |
Christoph Hellwig | b1adc71 | 2015-12-23 19:12:45 +0100 | [diff] [blame] | 259 | |
| 260 | /* |
| 261 | * This device supports the IB "base memory management extension", |
| 262 | * which includes support for fast registrations (IB_WR_REG_MR, |
| 263 | * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should |
| 264 | * also be set by any iWarp device which must support FRs to comply |
| 265 | * to the iWarp verbs spec. iWarp devices also support the |
| 266 | * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the |
| 267 | * stag. |
| 268 | */ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 269 | IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), |
| 270 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), |
| 271 | IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), |
| 272 | IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), |
| 273 | IB_DEVICE_RC_IP_CSUM = (1 << 25), |
Noa Osherovich | ebaaee2 | 2017-01-18 15:39:54 +0200 | [diff] [blame] | 274 | /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 275 | IB_DEVICE_RAW_IP_CSUM = (1 << 26), |
Leon Romanovsky | 8a06ce5 | 2015-12-20 12:16:10 +0200 | [diff] [blame] | 276 | /* |
| 277 | * Devices should set IB_DEVICE_CROSS_CHANNEL if they |
| 278 | * support execution of WQEs that involve synchronization |
| 279 | * of I/O operations with single completion queue managed |
| 280 | * by hardware. |
| 281 | */ |
Leon Romanovsky | 78b57f9 | 2017-08-17 15:50:37 +0300 | [diff] [blame] | 282 | IB_DEVICE_CROSS_CHANNEL = (1 << 27), |
Leon Romanovsky | 7ca0bc5 | 2015-12-20 12:16:09 +0200 | [diff] [blame] | 283 | IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), |
Israel Rukshin | c0a6cbb | 2019-06-11 18:52:50 +0300 | [diff] [blame] | 284 | IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30), |
Max Gurtovoy | 47355b3 | 2016-06-06 19:34:39 +0300 | [diff] [blame] | 285 | IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 286 | IB_DEVICE_SG_GAPS_REG = (1ULL << 32), |
Max Gurtovoy | c7e162a | 2016-06-06 19:34:40 +0300 | [diff] [blame] | 287 | IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), |
Noa Osherovich | ebaaee2 | 2017-01-18 15:39:54 +0200 | [diff] [blame] | 288 | /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ |
Max Gurtovoy | c7e162a | 2016-06-06 19:34:40 +0300 | [diff] [blame] | 289 | IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), |
Gary Leshner | 7f90a5a | 2020-05-11 12:06:07 -0400 | [diff] [blame] | 290 | IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35), |
Noa Osherovich | e1d2e88 | 2017-10-29 13:59:44 +0200 | [diff] [blame] | 291 | /* The device supports padding incoming writes to cacheline. */ |
| 292 | IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36), |
Steve Wise | 3856ec4 | 2019-02-15 11:03:53 -0800 | [diff] [blame] | 293 | IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37), |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 294 | }; |
| 295 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | enum ib_atomic_cap { |
| 297 | IB_ATOMIC_NONE, |
| 298 | IB_ATOMIC_HCA, |
| 299 | IB_ATOMIC_GLOB |
| 300 | }; |
| 301 | |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 302 | enum ib_odp_general_cap_bits { |
Artemy Kovalyov | 25bf14d | 2017-01-18 16:58:06 +0200 | [diff] [blame] | 303 | IB_ODP_SUPPORT = 1 << 0, |
| 304 | IB_ODP_SUPPORT_IMPLICIT = 1 << 1, |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 305 | }; |
| 306 | |
| 307 | enum ib_odp_transport_cap_bits { |
| 308 | IB_ODP_SUPPORT_SEND = 1 << 0, |
| 309 | IB_ODP_SUPPORT_RECV = 1 << 1, |
| 310 | IB_ODP_SUPPORT_WRITE = 1 << 2, |
| 311 | IB_ODP_SUPPORT_READ = 1 << 3, |
| 312 | IB_ODP_SUPPORT_ATOMIC = 1 << 4, |
Moni Shoua | da82334 | 2019-01-22 08:48:41 +0200 | [diff] [blame] | 313 | IB_ODP_SUPPORT_SRQ_RECV = 1 << 5, |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 314 | }; |
| 315 | |
| 316 | struct ib_odp_caps { |
| 317 | uint64_t general_caps; |
| 318 | struct { |
| 319 | uint32_t rc_odp_caps; |
| 320 | uint32_t uc_odp_caps; |
| 321 | uint32_t ud_odp_caps; |
Moni Shoua | 52a72e2 | 2019-01-22 08:48:42 +0200 | [diff] [blame] | 322 | uint32_t xrc_odp_caps; |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 323 | } per_transport_caps; |
| 324 | }; |
| 325 | |
Yishai Hadas | ccf2056 | 2016-08-28 11:28:43 +0300 | [diff] [blame] | 326 | struct ib_rss_caps { |
| 327 | /* Corresponding bit will be set if qp type from |
| 328 | * 'enum ib_qp_type' is supported, e.g. |
| 329 | * supported_qpts |= 1 << IB_QPT_UD |
| 330 | */ |
| 331 | u32 supported_qpts; |
| 332 | u32 max_rwq_indirection_tables; |
| 333 | u32 max_rwq_indirection_table_size; |
| 334 | }; |
| 335 | |
Artemy Kovalyov | 6938fc1e | 2017-08-17 15:52:03 +0300 | [diff] [blame] | 336 | enum ib_tm_cap_flags { |
Danit Goldberg | 89705e9 | 2019-07-05 19:21:57 +0300 | [diff] [blame] | 337 | /* Support tag matching with rendezvous offload for RC transport */ |
| 338 | IB_TM_CAP_RNDV_RC = 1 << 0, |
Artemy Kovalyov | 6938fc1e | 2017-08-17 15:52:03 +0300 | [diff] [blame] | 339 | }; |
| 340 | |
Leon Romanovsky | 78b1beb | 2017-09-24 21:46:29 +0300 | [diff] [blame] | 341 | struct ib_tm_caps { |
Artemy Kovalyov | 6938fc1e | 2017-08-17 15:52:03 +0300 | [diff] [blame] | 342 | /* Max size of RNDV header */ |
| 343 | u32 max_rndv_hdr_size; |
| 344 | /* Max number of entries in tag matching list */ |
| 345 | u32 max_num_tags; |
| 346 | /* From enum ib_tm_cap_flags */ |
| 347 | u32 flags; |
| 348 | /* Max number of outstanding list operations */ |
| 349 | u32 max_ops; |
| 350 | /* Max number of SGE in tag matching entry */ |
| 351 | u32 max_sge; |
| 352 | }; |
| 353 | |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 354 | struct ib_cq_init_attr { |
| 355 | unsigned int cqe; |
Dan Carpenter | a9018ad | 2019-10-11 16:34:19 +0300 | [diff] [blame] | 356 | u32 comp_vector; |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 357 | u32 flags; |
| 358 | }; |
| 359 | |
Yonatan Cohen | 869ddcf | 2017-11-13 10:51:13 +0200 | [diff] [blame] | 360 | enum ib_cq_attr_mask { |
| 361 | IB_CQ_MODERATE = 1 << 0, |
| 362 | }; |
| 363 | |
Yonatan Cohen | 18bd907 | 2017-11-13 10:51:16 +0200 | [diff] [blame] | 364 | struct ib_cq_caps { |
| 365 | u16 max_cq_moderation_count; |
| 366 | u16 max_cq_moderation_period; |
| 367 | }; |
| 368 | |
Ariel Levkovich | be934cc | 2018-04-05 18:53:25 +0300 | [diff] [blame] | 369 | struct ib_dm_mr_attr { |
| 370 | u64 length; |
| 371 | u64 offset; |
| 372 | u32 access_flags; |
| 373 | }; |
| 374 | |
Ariel Levkovich | bee76d7 | 2018-04-05 18:53:24 +0300 | [diff] [blame] | 375 | struct ib_dm_alloc_attr { |
| 376 | u64 length; |
| 377 | u32 alignment; |
| 378 | u32 flags; |
| 379 | }; |
| 380 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | struct ib_device_attr { |
| 382 | u64 fw_ver; |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 383 | __be64 sys_image_guid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | u64 max_mr_size; |
| 385 | u64 page_size_cap; |
| 386 | u32 vendor_id; |
| 387 | u32 vendor_part_id; |
| 388 | u32 hw_ver; |
| 389 | int max_qp; |
| 390 | int max_qp_wr; |
Leon Romanovsky | fb532d6 | 2016-02-23 10:25:25 +0200 | [diff] [blame] | 391 | u64 device_cap_flags; |
Steve Wise | 33023fb | 2018-06-18 08:05:26 -0700 | [diff] [blame] | 392 | int max_send_sge; |
| 393 | int max_recv_sge; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | int max_sge_rd; |
| 395 | int max_cq; |
| 396 | int max_cqe; |
| 397 | int max_mr; |
| 398 | int max_pd; |
| 399 | int max_qp_rd_atom; |
| 400 | int max_ee_rd_atom; |
| 401 | int max_res_rd_atom; |
| 402 | int max_qp_init_rd_atom; |
| 403 | int max_ee_init_rd_atom; |
| 404 | enum ib_atomic_cap atomic_cap; |
Vladimir Sokolovsky | 5e80ba8 | 2010-04-14 17:23:01 +0300 | [diff] [blame] | 405 | enum ib_atomic_cap masked_atomic_cap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | int max_ee; |
| 407 | int max_rdd; |
| 408 | int max_mw; |
| 409 | int max_raw_ipv6_qp; |
| 410 | int max_raw_ethy_qp; |
| 411 | int max_mcast_grp; |
| 412 | int max_mcast_qp_attach; |
| 413 | int max_total_mcast_qp_attach; |
| 414 | int max_ah; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | int max_srq; |
| 416 | int max_srq_wr; |
| 417 | int max_srq_sge; |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 418 | unsigned int max_fast_reg_page_list_len; |
Max Gurtovoy | 62e3c37 | 2019-06-11 18:52:43 +0300 | [diff] [blame] | 419 | unsigned int max_pi_fast_reg_page_list_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | u16 max_pkeys; |
| 421 | u8 local_ca_ack_delay; |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 422 | int sig_prot_cap; |
| 423 | int sig_guard_cap; |
Sagi Grimberg | 860f10a | 2014-12-11 17:04:16 +0200 | [diff] [blame] | 424 | struct ib_odp_caps odp_caps; |
Matan Barak | 24306dc | 2015-06-11 16:35:24 +0300 | [diff] [blame] | 425 | uint64_t timestamp_mask; |
| 426 | uint64_t hca_core_clock; /* in KHZ */ |
Yishai Hadas | ccf2056 | 2016-08-28 11:28:43 +0300 | [diff] [blame] | 427 | struct ib_rss_caps rss_caps; |
| 428 | u32 max_wq_type_rq; |
Noa Osherovich | ebaaee2 | 2017-01-18 15:39:54 +0200 | [diff] [blame] | 429 | u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ |
Leon Romanovsky | 78b1beb | 2017-09-24 21:46:29 +0300 | [diff] [blame] | 430 | struct ib_tm_caps tm_caps; |
Yonatan Cohen | 18bd907 | 2017-11-13 10:51:16 +0200 | [diff] [blame] | 431 | struct ib_cq_caps cq_caps; |
Ariel Levkovich | 1d8eeb9 | 2018-04-05 18:53:23 +0300 | [diff] [blame] | 432 | u64 max_dm_size; |
Yamin Friedman | 00bd143 | 2019-10-07 16:59:32 +0300 | [diff] [blame] | 433 | /* Max entries for sgl for optimized performance per READ */ |
| 434 | u32 max_sgl_rd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | }; |
| 436 | |
| 437 | enum ib_mtu { |
| 438 | IB_MTU_256 = 1, |
| 439 | IB_MTU_512 = 2, |
| 440 | IB_MTU_1024 = 3, |
| 441 | IB_MTU_2048 = 4, |
| 442 | IB_MTU_4096 = 5 |
| 443 | }; |
| 444 | |
Kaike Wan | 6d72344 | 2020-05-11 12:06:18 -0400 | [diff] [blame] | 445 | enum opa_mtu { |
| 446 | OPA_MTU_8192 = 6, |
| 447 | OPA_MTU_10240 = 7 |
| 448 | }; |
| 449 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) |
| 451 | { |
| 452 | switch (mtu) { |
| 453 | case IB_MTU_256: return 256; |
| 454 | case IB_MTU_512: return 512; |
| 455 | case IB_MTU_1024: return 1024; |
| 456 | case IB_MTU_2048: return 2048; |
| 457 | case IB_MTU_4096: return 4096; |
| 458 | default: return -1; |
| 459 | } |
| 460 | } |
| 461 | |
Amrani, Ram | d3f4aad | 2016-12-26 08:40:57 +0200 | [diff] [blame] | 462 | static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) |
| 463 | { |
| 464 | if (mtu >= 4096) |
| 465 | return IB_MTU_4096; |
| 466 | else if (mtu >= 2048) |
| 467 | return IB_MTU_2048; |
| 468 | else if (mtu >= 1024) |
| 469 | return IB_MTU_1024; |
| 470 | else if (mtu >= 512) |
| 471 | return IB_MTU_512; |
| 472 | else |
| 473 | return IB_MTU_256; |
| 474 | } |
| 475 | |
Kaike Wan | 6d72344 | 2020-05-11 12:06:18 -0400 | [diff] [blame] | 476 | static inline int opa_mtu_enum_to_int(enum opa_mtu mtu) |
| 477 | { |
| 478 | switch (mtu) { |
| 479 | case OPA_MTU_8192: |
| 480 | return 8192; |
| 481 | case OPA_MTU_10240: |
| 482 | return 10240; |
| 483 | default: |
| 484 | return(ib_mtu_enum_to_int((enum ib_mtu)mtu)); |
| 485 | } |
| 486 | } |
| 487 | |
| 488 | static inline enum opa_mtu opa_mtu_int_to_enum(int mtu) |
| 489 | { |
| 490 | if (mtu >= 10240) |
| 491 | return OPA_MTU_10240; |
| 492 | else if (mtu >= 8192) |
| 493 | return OPA_MTU_8192; |
| 494 | else |
| 495 | return ((enum opa_mtu)ib_mtu_int_to_enum(mtu)); |
| 496 | } |
| 497 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | enum ib_port_state { |
| 499 | IB_PORT_NOP = 0, |
| 500 | IB_PORT_DOWN = 1, |
| 501 | IB_PORT_INIT = 2, |
| 502 | IB_PORT_ARMED = 3, |
| 503 | IB_PORT_ACTIVE = 4, |
| 504 | IB_PORT_ACTIVE_DEFER = 5 |
| 505 | }; |
| 506 | |
Kamal Heib | 72a7720 | 2019-08-07 13:31:35 +0300 | [diff] [blame] | 507 | enum ib_port_phys_state { |
| 508 | IB_PORT_PHYS_STATE_SLEEP = 1, |
| 509 | IB_PORT_PHYS_STATE_POLLING = 2, |
| 510 | IB_PORT_PHYS_STATE_DISABLED = 3, |
| 511 | IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, |
| 512 | IB_PORT_PHYS_STATE_LINK_UP = 5, |
| 513 | IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, |
| 514 | IB_PORT_PHYS_STATE_PHY_TEST = 7, |
| 515 | }; |
| 516 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | enum ib_port_width { |
| 518 | IB_WIDTH_1X = 1, |
Michael Guralnik | dbabf68 | 2018-12-09 11:49:49 +0200 | [diff] [blame] | 519 | IB_WIDTH_2X = 16, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | IB_WIDTH_4X = 2, |
| 521 | IB_WIDTH_8X = 4, |
| 522 | IB_WIDTH_12X = 8 |
| 523 | }; |
| 524 | |
| 525 | static inline int ib_width_enum_to_int(enum ib_port_width width) |
| 526 | { |
| 527 | switch (width) { |
| 528 | case IB_WIDTH_1X: return 1; |
Michael Guralnik | dbabf68 | 2018-12-09 11:49:49 +0200 | [diff] [blame] | 529 | case IB_WIDTH_2X: return 2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | case IB_WIDTH_4X: return 4; |
| 531 | case IB_WIDTH_8X: return 8; |
| 532 | case IB_WIDTH_12X: return 12; |
| 533 | default: return -1; |
| 534 | } |
| 535 | } |
| 536 | |
Or Gerlitz | 2e96691 | 2012-02-28 18:49:50 +0200 | [diff] [blame] | 537 | enum ib_port_speed { |
| 538 | IB_SPEED_SDR = 1, |
| 539 | IB_SPEED_DDR = 2, |
| 540 | IB_SPEED_QDR = 4, |
| 541 | IB_SPEED_FDR10 = 8, |
| 542 | IB_SPEED_FDR = 16, |
Noa Osherovich | 12113a3 | 2017-04-20 20:53:31 +0300 | [diff] [blame] | 543 | IB_SPEED_EDR = 32, |
Aharon Landau | 376ceb3 | 2020-09-17 12:02:23 +0300 | [diff] [blame] | 544 | IB_SPEED_HDR = 64, |
| 545 | IB_SPEED_NDR = 128, |
Or Gerlitz | 2e96691 | 2012-02-28 18:49:50 +0200 | [diff] [blame] | 546 | }; |
| 547 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 548 | /** |
| 549 | * struct rdma_hw_stats |
Mark Bloch | e945130 | 2018-03-27 15:51:05 +0300 | [diff] [blame] | 550 | * @lock - Mutex to protect parallel write access to lifespan and values |
| 551 | * of counters, which are 64bits and not guaranteeed to be written |
| 552 | * atomicaly on 32bits systems. |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 553 | * @timestamp - Used by the core code to track when the last update was |
| 554 | * @lifespan - Used by the core code to determine how old the counters |
| 555 | * should be before being updated again. Stored in jiffies, defaults |
| 556 | * to 10 milliseconds, drivers can override the default be specifying |
| 557 | * their own value during their allocation routine. |
| 558 | * @name - Array of pointers to static names used for the counters in |
| 559 | * directory. |
| 560 | * @num_counters - How many hardware counters there are. If name is |
| 561 | * shorter than this number, a kernel oops will result. Driver authors |
| 562 | * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) |
| 563 | * in their code to prevent this. |
| 564 | * @value - Array of u64 counters that are accessed by the sysfs code and |
| 565 | * filled in by the drivers get_stats routine |
| 566 | */ |
| 567 | struct rdma_hw_stats { |
Mark Bloch | e945130 | 2018-03-27 15:51:05 +0300 | [diff] [blame] | 568 | struct mutex lock; /* Protect lifespan and values[] */ |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 569 | unsigned long timestamp; |
| 570 | unsigned long lifespan; |
| 571 | const char * const *names; |
| 572 | int num_counters; |
| 573 | u64 value[]; |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 574 | }; |
| 575 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 576 | #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 |
| 577 | /** |
| 578 | * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct |
| 579 | * for drivers. |
| 580 | * @names - Array of static const char * |
| 581 | * @num_counters - How many elements in array |
| 582 | * @lifespan - How many milliseconds between updates |
| 583 | */ |
| 584 | static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( |
| 585 | const char * const *names, int num_counters, |
| 586 | unsigned long lifespan) |
| 587 | { |
| 588 | struct rdma_hw_stats *stats; |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 589 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 590 | stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), |
| 591 | GFP_KERNEL); |
| 592 | if (!stats) |
| 593 | return NULL; |
| 594 | stats->names = names; |
| 595 | stats->num_counters = num_counters; |
| 596 | stats->lifespan = msecs_to_jiffies(lifespan); |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 597 | |
Christoph Lameter | b40f475 | 2016-05-16 12:49:33 -0500 | [diff] [blame] | 598 | return stats; |
| 599 | } |
| 600 | |
Steve Wise | 7f624d0 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 601 | |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 602 | /* Define bits for the various functionality this port needs to be supported by |
| 603 | * the core. |
| 604 | */ |
| 605 | /* Management 0x00000FFF */ |
| 606 | #define RDMA_CORE_CAP_IB_MAD 0x00000001 |
| 607 | #define RDMA_CORE_CAP_IB_SMI 0x00000002 |
| 608 | #define RDMA_CORE_CAP_IB_CM 0x00000004 |
| 609 | #define RDMA_CORE_CAP_IW_CM 0x00000008 |
| 610 | #define RDMA_CORE_CAP_IB_SA 0x00000010 |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 611 | #define RDMA_CORE_CAP_OPA_MAD 0x00000020 |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 612 | |
| 613 | /* Address format 0x000FF000 */ |
| 614 | #define RDMA_CORE_CAP_AF_IB 0x00001000 |
| 615 | #define RDMA_CORE_CAP_ETH_AH 0x00002000 |
Dasaratharaman Chandramouli | 94d595c | 2017-03-20 19:38:09 -0400 | [diff] [blame] | 616 | #define RDMA_CORE_CAP_OPA_AH 0x00004000 |
Artemy Kovalyov | b02289b | 2018-07-04 15:57:50 +0300 | [diff] [blame] | 617 | #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000 |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 618 | |
| 619 | /* Protocol 0xFFF00000 */ |
| 620 | #define RDMA_CORE_CAP_PROT_IB 0x00100000 |
| 621 | #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 |
| 622 | #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 623 | #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 |
Or Gerlitz | aa773bd | 2017-01-24 13:02:35 +0200 | [diff] [blame] | 624 | #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 |
Or Gerlitz | ce1e055 | 2017-01-24 13:02:38 +0200 | [diff] [blame] | 625 | #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 626 | |
Artemy Kovalyov | b02289b | 2018-07-04 15:57:50 +0300 | [diff] [blame] | 627 | #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \ |
| 628 | | RDMA_CORE_CAP_PROT_ROCE \ |
| 629 | | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP) |
| 630 | |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 631 | #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ |
| 632 | | RDMA_CORE_CAP_IB_MAD \ |
| 633 | | RDMA_CORE_CAP_IB_SMI \ |
| 634 | | RDMA_CORE_CAP_IB_CM \ |
| 635 | | RDMA_CORE_CAP_IB_SA \ |
| 636 | | RDMA_CORE_CAP_AF_IB) |
| 637 | #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ |
| 638 | | RDMA_CORE_CAP_IB_MAD \ |
| 639 | | RDMA_CORE_CAP_IB_CM \ |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 640 | | RDMA_CORE_CAP_AF_IB \ |
| 641 | | RDMA_CORE_CAP_ETH_AH) |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 642 | #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ |
| 643 | (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ |
| 644 | | RDMA_CORE_CAP_IB_MAD \ |
| 645 | | RDMA_CORE_CAP_IB_CM \ |
| 646 | | RDMA_CORE_CAP_AF_IB \ |
| 647 | | RDMA_CORE_CAP_ETH_AH) |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 648 | #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ |
| 649 | | RDMA_CORE_CAP_IW_CM) |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 650 | #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ |
| 651 | | RDMA_CORE_CAP_OPA_MAD) |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 652 | |
Or Gerlitz | aa773bd | 2017-01-24 13:02:35 +0200 | [diff] [blame] | 653 | #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) |
| 654 | |
Or Gerlitz | ce1e055 | 2017-01-24 13:02:38 +0200 | [diff] [blame] | 655 | #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) |
| 656 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | struct ib_port_attr { |
Eli Cohen | fad61ad | 2016-03-11 22:58:36 +0200 | [diff] [blame] | 658 | u64 subnet_prefix; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | enum ib_port_state state; |
| 660 | enum ib_mtu max_mtu; |
| 661 | enum ib_mtu active_mtu; |
Kaike Wan | 6d72344 | 2020-05-11 12:06:18 -0400 | [diff] [blame] | 662 | u32 phys_mtu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | int gid_tbl_len; |
Jason Gunthorpe | 2f944c0 | 2018-07-04 15:57:48 +0300 | [diff] [blame] | 664 | unsigned int ip_gids:1; |
| 665 | /* This is the value from PortInfo CapabilityMask, defined by IBA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | u32 port_cap_flags; |
| 667 | u32 max_msg_sz; |
| 668 | u32 bad_pkey_cntr; |
| 669 | u32 qkey_viol_cntr; |
| 670 | u16 pkey_tbl_len; |
Dasaratharaman Chandramouli | db58540 | 2017-06-08 13:37:48 -0400 | [diff] [blame] | 671 | u32 sm_lid; |
Dasaratharaman Chandramouli | 582faf3 | 2017-06-08 13:37:47 -0400 | [diff] [blame] | 672 | u32 lid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | u8 lmc; |
| 674 | u8 max_vl_num; |
| 675 | u8 sm_sl; |
| 676 | u8 subnet_timeout; |
| 677 | u8 init_type_reply; |
| 678 | u8 active_width; |
Aharon Landau | 376ceb3 | 2020-09-17 12:02:23 +0300 | [diff] [blame] | 679 | u16 active_speed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | u8 phys_state; |
Michael Guralnik | 1e8f43b | 2018-12-09 11:49:48 +0200 | [diff] [blame] | 681 | u16 port_cap_flags2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | }; |
| 683 | |
| 684 | enum ib_device_modify_flags { |
Roland Dreier | c5bcbbb | 2006-02-02 09:47:14 -0800 | [diff] [blame] | 685 | IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, |
| 686 | IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | }; |
| 688 | |
Yuval Shaia | bd99fde | 2016-08-25 10:57:07 -0700 | [diff] [blame] | 689 | #define IB_DEVICE_NODE_DESC_MAX 64 |
| 690 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | struct ib_device_modify { |
| 692 | u64 sys_image_guid; |
Yuval Shaia | bd99fde | 2016-08-25 10:57:07 -0700 | [diff] [blame] | 693 | char node_desc[IB_DEVICE_NODE_DESC_MAX]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | }; |
| 695 | |
| 696 | enum ib_port_modify_flags { |
| 697 | IB_PORT_SHUTDOWN = 1, |
| 698 | IB_PORT_INIT_TYPE = (1<<2), |
Vishwanathapura, Niranjana | cb49366 | 2017-06-01 17:04:02 -0700 | [diff] [blame] | 699 | IB_PORT_RESET_QKEY_CNTR = (1<<3), |
| 700 | IB_PORT_OPA_MASK_CHG = (1<<4) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | }; |
| 702 | |
| 703 | struct ib_port_modify { |
| 704 | u32 set_port_cap_mask; |
| 705 | u32 clr_port_cap_mask; |
| 706 | u8 init_type; |
| 707 | }; |
| 708 | |
| 709 | enum ib_event_type { |
| 710 | IB_EVENT_CQ_ERR, |
| 711 | IB_EVENT_QP_FATAL, |
| 712 | IB_EVENT_QP_REQ_ERR, |
| 713 | IB_EVENT_QP_ACCESS_ERR, |
| 714 | IB_EVENT_COMM_EST, |
| 715 | IB_EVENT_SQ_DRAINED, |
| 716 | IB_EVENT_PATH_MIG, |
| 717 | IB_EVENT_PATH_MIG_ERR, |
| 718 | IB_EVENT_DEVICE_FATAL, |
| 719 | IB_EVENT_PORT_ACTIVE, |
| 720 | IB_EVENT_PORT_ERR, |
| 721 | IB_EVENT_LID_CHANGE, |
| 722 | IB_EVENT_PKEY_CHANGE, |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 723 | IB_EVENT_SM_CHANGE, |
| 724 | IB_EVENT_SRQ_ERR, |
| 725 | IB_EVENT_SRQ_LIMIT_REACHED, |
Leonid Arsh | 63942c9 | 2006-06-17 20:37:35 -0700 | [diff] [blame] | 726 | IB_EVENT_QP_LAST_WQE_REACHED, |
Or Gerlitz | 761d90e | 2011-06-15 14:39:29 +0000 | [diff] [blame] | 727 | IB_EVENT_CLIENT_REREGISTER, |
| 728 | IB_EVENT_GID_CHANGE, |
Yishai Hadas | f213c05 | 2016-05-23 15:20:49 +0300 | [diff] [blame] | 729 | IB_EVENT_WQ_FATAL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | }; |
| 731 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 732 | const char *__attribute_const__ ib_event_msg(enum ib_event_type event); |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 733 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | struct ib_event { |
| 735 | struct ib_device *device; |
| 736 | union { |
| 737 | struct ib_cq *cq; |
| 738 | struct ib_qp *qp; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 739 | struct ib_srq *srq; |
Yishai Hadas | f213c05 | 2016-05-23 15:20:49 +0300 | [diff] [blame] | 740 | struct ib_wq *wq; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 741 | u32 port_num; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | } element; |
| 743 | enum ib_event_type event; |
| 744 | }; |
| 745 | |
| 746 | struct ib_event_handler { |
| 747 | struct ib_device *device; |
| 748 | void (*handler)(struct ib_event_handler *, struct ib_event *); |
| 749 | struct list_head list; |
| 750 | }; |
| 751 | |
| 752 | #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ |
| 753 | do { \ |
| 754 | (_ptr)->device = _device; \ |
| 755 | (_ptr)->handler = _handler; \ |
| 756 | INIT_LIST_HEAD(&(_ptr)->list); \ |
| 757 | } while (0) |
| 758 | |
| 759 | struct ib_global_route { |
Jason Gunthorpe | 8d9ec9a | 2018-06-13 10:22:03 +0300 | [diff] [blame] | 760 | const struct ib_gid_attr *sgid_attr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | union ib_gid dgid; |
| 762 | u32 flow_label; |
| 763 | u8 sgid_index; |
| 764 | u8 hop_limit; |
| 765 | u8 traffic_class; |
| 766 | }; |
| 767 | |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 768 | struct ib_grh { |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 769 | __be32 version_tclass_flow; |
| 770 | __be16 paylen; |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 771 | u8 next_hdr; |
| 772 | u8 hop_limit; |
| 773 | union ib_gid sgid; |
| 774 | union ib_gid dgid; |
| 775 | }; |
| 776 | |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 777 | union rdma_network_hdr { |
| 778 | struct ib_grh ibgrh; |
| 779 | struct { |
| 780 | /* The IB spec states that if it's IPv4, the header |
| 781 | * is located in the last 20 bytes of the header. |
| 782 | */ |
| 783 | u8 reserved[20]; |
| 784 | struct iphdr roce4grh; |
| 785 | }; |
| 786 | }; |
| 787 | |
Don Hiatt | 7dafbab | 2017-05-12 09:19:55 -0700 | [diff] [blame] | 788 | #define IB_QPN_MASK 0xFFFFFF |
| 789 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | enum { |
| 791 | IB_MULTICAST_QPN = 0xffffff |
| 792 | }; |
| 793 | |
Harvey Harrison | f3a7c66 | 2009-02-14 22:58:35 -0800 | [diff] [blame] | 794 | #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) |
Dennis Dalessandro | b4e6439 | 2016-01-06 10:04:31 -0800 | [diff] [blame] | 795 | #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) |
Sean Hefty | 97f52eb | 2005-08-13 21:05:57 -0700 | [diff] [blame] | 796 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | enum ib_ah_flags { |
| 798 | IB_AH_GRH = 1 |
| 799 | }; |
| 800 | |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 801 | enum ib_rate { |
| 802 | IB_RATE_PORT_CURRENT = 0, |
| 803 | IB_RATE_2_5_GBPS = 2, |
| 804 | IB_RATE_5_GBPS = 5, |
| 805 | IB_RATE_10_GBPS = 3, |
| 806 | IB_RATE_20_GBPS = 6, |
| 807 | IB_RATE_30_GBPS = 4, |
| 808 | IB_RATE_40_GBPS = 7, |
| 809 | IB_RATE_60_GBPS = 8, |
| 810 | IB_RATE_80_GBPS = 9, |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 811 | IB_RATE_120_GBPS = 10, |
| 812 | IB_RATE_14_GBPS = 11, |
| 813 | IB_RATE_56_GBPS = 12, |
| 814 | IB_RATE_112_GBPS = 13, |
| 815 | IB_RATE_168_GBPS = 14, |
| 816 | IB_RATE_25_GBPS = 15, |
| 817 | IB_RATE_100_GBPS = 16, |
| 818 | IB_RATE_200_GBPS = 17, |
Michael Guralnik | a5a5d19 | 2018-12-09 11:49:50 +0200 | [diff] [blame] | 819 | IB_RATE_300_GBPS = 18, |
| 820 | IB_RATE_28_GBPS = 19, |
| 821 | IB_RATE_50_GBPS = 20, |
| 822 | IB_RATE_400_GBPS = 21, |
| 823 | IB_RATE_600_GBPS = 22, |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 824 | }; |
| 825 | |
| 826 | /** |
| 827 | * ib_rate_to_mult - Convert the IB rate enum to a multiple of the |
| 828 | * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be |
| 829 | * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. |
| 830 | * @rate: rate to convert. |
| 831 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 832 | __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 833 | |
| 834 | /** |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 835 | * ib_rate_to_mbps - Convert the IB rate enum to Mbps. |
| 836 | * For example, IB_RATE_2_5_GBPS will be converted to 2500. |
| 837 | * @rate: rate to convert. |
| 838 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 839 | __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 840 | |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 841 | |
| 842 | /** |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 843 | * enum ib_mr_type - memory region type |
| 844 | * @IB_MR_TYPE_MEM_REG: memory region that is used for |
| 845 | * normal registration |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 846 | * @IB_MR_TYPE_SG_GAPS: memory region that is capable to |
| 847 | * register any arbitrary sg lists (without |
| 848 | * the normal mr constraints - see |
| 849 | * ib_map_mr_sg) |
Max Gurtovoy | a0bc099 | 2019-06-11 18:52:38 +0300 | [diff] [blame] | 850 | * @IB_MR_TYPE_DM: memory region that is used for device |
| 851 | * memory registration |
| 852 | * @IB_MR_TYPE_USER: memory region that is used for the user-space |
| 853 | * application |
| 854 | * @IB_MR_TYPE_DMA: memory region that is used for DMA operations |
| 855 | * without address translations (VA=PA) |
Israel Rukshin | 26bc7ea | 2019-06-11 18:52:39 +0300 | [diff] [blame] | 856 | * @IB_MR_TYPE_INTEGRITY: memory region that is used for |
| 857 | * data integrity operations |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 858 | */ |
Sagi Grimberg | 9bee178 | 2015-07-30 10:32:35 +0300 | [diff] [blame] | 859 | enum ib_mr_type { |
| 860 | IB_MR_TYPE_MEM_REG, |
Sagi Grimberg | f5aa915 | 2016-02-29 19:07:32 +0200 | [diff] [blame] | 861 | IB_MR_TYPE_SG_GAPS, |
Max Gurtovoy | a0bc099 | 2019-06-11 18:52:38 +0300 | [diff] [blame] | 862 | IB_MR_TYPE_DM, |
| 863 | IB_MR_TYPE_USER, |
| 864 | IB_MR_TYPE_DMA, |
Israel Rukshin | 26bc7ea | 2019-06-11 18:52:39 +0300 | [diff] [blame] | 865 | IB_MR_TYPE_INTEGRITY, |
Sagi Grimberg | 17cd3a2 | 2014-02-23 14:19:04 +0200 | [diff] [blame] | 866 | }; |
| 867 | |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 868 | enum ib_mr_status_check { |
| 869 | IB_MR_CHECK_SIG_STATUS = 1, |
| 870 | }; |
| 871 | |
| 872 | /** |
| 873 | * struct ib_mr_status - Memory region status container |
| 874 | * |
| 875 | * @fail_status: Bitmask of MR checks status. For each |
| 876 | * failed check a corresponding status bit is set. |
| 877 | * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS |
| 878 | * failure. |
| 879 | */ |
| 880 | struct ib_mr_status { |
| 881 | u32 fail_status; |
| 882 | struct ib_sig_err sig_err; |
| 883 | }; |
| 884 | |
Marcel Apfelbaum | 71eeba1 | 2011-10-05 14:21:47 +0300 | [diff] [blame] | 885 | /** |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 886 | * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate |
| 887 | * enum. |
| 888 | * @mult: multiple to convert. |
| 889 | */ |
Roland Dreier | 8385fd8 | 2014-06-04 10:00:16 -0700 | [diff] [blame] | 890 | __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); |
Jack Morgenstein | bf6a9e3 | 2006-04-10 09:43:47 -0700 | [diff] [blame] | 891 | |
Maor Gottlieb | fa5d010 | 2020-04-30 22:21:42 +0300 | [diff] [blame] | 892 | struct rdma_ah_init_attr { |
| 893 | struct rdma_ah_attr *ah_attr; |
| 894 | u32 flags; |
Maor Gottlieb | 51aab12 | 2020-04-30 22:21:44 +0300 | [diff] [blame] | 895 | struct net_device *xmit_slave; |
Maor Gottlieb | fa5d010 | 2020-04-30 22:21:42 +0300 | [diff] [blame] | 896 | }; |
| 897 | |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 898 | enum rdma_ah_attr_type { |
Don Hiatt | 87daac6 | 2018-02-01 10:57:03 -0800 | [diff] [blame] | 899 | RDMA_AH_ATTR_TYPE_UNDEFINED, |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 900 | RDMA_AH_ATTR_TYPE_IB, |
| 901 | RDMA_AH_ATTR_TYPE_ROCE, |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 902 | RDMA_AH_ATTR_TYPE_OPA, |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 903 | }; |
| 904 | |
| 905 | struct ib_ah_attr { |
| 906 | u16 dlid; |
| 907 | u8 src_path_bits; |
| 908 | }; |
| 909 | |
| 910 | struct roce_ah_attr { |
| 911 | u8 dmac[ETH_ALEN]; |
| 912 | }; |
| 913 | |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 914 | struct opa_ah_attr { |
| 915 | u32 dlid; |
| 916 | u8 src_path_bits; |
Don Hiatt | d98bb7f | 2017-08-04 13:54:16 -0700 | [diff] [blame] | 917 | bool make_grd; |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 918 | }; |
| 919 | |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 920 | struct rdma_ah_attr { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 921 | struct ib_global_route grh; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | u8 sl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | u8 static_rate; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 924 | u32 port_num; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 925 | u8 ah_flags; |
| 926 | enum rdma_ah_attr_type type; |
| 927 | union { |
| 928 | struct ib_ah_attr ib; |
| 929 | struct roce_ah_attr roce; |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 930 | struct opa_ah_attr opa; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 931 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | }; |
| 933 | |
| 934 | enum ib_wc_status { |
| 935 | IB_WC_SUCCESS, |
| 936 | IB_WC_LOC_LEN_ERR, |
| 937 | IB_WC_LOC_QP_OP_ERR, |
| 938 | IB_WC_LOC_EEC_OP_ERR, |
| 939 | IB_WC_LOC_PROT_ERR, |
| 940 | IB_WC_WR_FLUSH_ERR, |
| 941 | IB_WC_MW_BIND_ERR, |
| 942 | IB_WC_BAD_RESP_ERR, |
| 943 | IB_WC_LOC_ACCESS_ERR, |
| 944 | IB_WC_REM_INV_REQ_ERR, |
| 945 | IB_WC_REM_ACCESS_ERR, |
| 946 | IB_WC_REM_OP_ERR, |
| 947 | IB_WC_RETRY_EXC_ERR, |
| 948 | IB_WC_RNR_RETRY_EXC_ERR, |
| 949 | IB_WC_LOC_RDD_VIOL_ERR, |
| 950 | IB_WC_REM_INV_RD_REQ_ERR, |
| 951 | IB_WC_REM_ABORT_ERR, |
| 952 | IB_WC_INV_EECN_ERR, |
| 953 | IB_WC_INV_EEC_STATE_ERR, |
| 954 | IB_WC_FATAL_ERR, |
| 955 | IB_WC_RESP_TIMEOUT_ERR, |
| 956 | IB_WC_GENERAL_ERR |
| 957 | }; |
| 958 | |
Bart Van Assche | db7489e | 2015-08-03 10:01:52 -0700 | [diff] [blame] | 959 | const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); |
Sagi Grimberg | 2b1b5b6 | 2015-05-18 13:40:28 +0300 | [diff] [blame] | 960 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | enum ib_wc_opcode { |
Bob Pearson | b60b9c0 | 2020-09-03 17:40:34 -0500 | [diff] [blame] | 962 | IB_WC_SEND = IB_UVERBS_WC_SEND, |
| 963 | IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE, |
| 964 | IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ, |
| 965 | IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP, |
| 966 | IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD, |
| 967 | IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW, |
| 968 | IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV, |
| 969 | IB_WC_LSO = IB_UVERBS_WC_TSO, |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 970 | IB_WC_REG_MR, |
Vladimir Sokolovsky | 5e80ba8 | 2010-04-14 17:23:01 +0300 | [diff] [blame] | 971 | IB_WC_MASKED_COMP_SWAP, |
| 972 | IB_WC_MASKED_FETCH_ADD, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | /* |
| 974 | * Set value of IB_WC_RECV so consumers can test if a completion is a |
| 975 | * receive by testing (opcode & IB_WC_RECV). |
| 976 | */ |
| 977 | IB_WC_RECV = 1 << 7, |
| 978 | IB_WC_RECV_RDMA_WITH_IMM |
| 979 | }; |
| 980 | |
| 981 | enum ib_wc_flags { |
| 982 | IB_WC_GRH = 1, |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 983 | IB_WC_WITH_IMM = (1<<1), |
| 984 | IB_WC_WITH_INVALIDATE = (1<<2), |
Or Gerlitz | d927d50 | 2012-01-11 19:03:51 +0200 | [diff] [blame] | 985 | IB_WC_IP_CSUM_OK = (1<<3), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 986 | IB_WC_WITH_SMAC = (1<<4), |
| 987 | IB_WC_WITH_VLAN = (1<<5), |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 988 | IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | }; |
| 990 | |
| 991 | struct ib_wc { |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 992 | union { |
| 993 | u64 wr_id; |
| 994 | struct ib_cqe *wr_cqe; |
| 995 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | enum ib_wc_status status; |
| 997 | enum ib_wc_opcode opcode; |
| 998 | u32 vendor_err; |
| 999 | u32 byte_len; |
Michael S. Tsirkin | 062dbb6 | 2006-12-31 21:09:42 +0200 | [diff] [blame] | 1000 | struct ib_qp *qp; |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 1001 | union { |
| 1002 | __be32 imm_data; |
| 1003 | u32 invalidate_rkey; |
| 1004 | } ex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | u32 src_qp; |
Bodong Wang | cd2a6e7 | 2018-01-12 07:58:41 +0200 | [diff] [blame] | 1006 | u32 slid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | int wc_flags; |
| 1008 | u16 pkey_index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | u8 sl; |
| 1010 | u8 dlid_path_bits; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 1011 | u32 port_num; /* valid only for DR SMPs on switches */ |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1012 | u8 smac[ETH_ALEN]; |
| 1013 | u16 vlan_id; |
Somnath Kotur | c865f24 | 2015-12-23 14:56:51 +0200 | [diff] [blame] | 1014 | u8 network_hdr_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | }; |
| 1016 | |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 1017 | enum ib_cq_notify_flags { |
| 1018 | IB_CQ_SOLICITED = 1 << 0, |
| 1019 | IB_CQ_NEXT_COMP = 1 << 1, |
| 1020 | IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, |
| 1021 | IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | }; |
| 1023 | |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 1024 | enum ib_srq_type { |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1025 | IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC, |
| 1026 | IB_SRQT_XRC = IB_UVERBS_SRQT_XRC, |
| 1027 | IB_SRQT_TM = IB_UVERBS_SRQT_TM, |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 1028 | }; |
| 1029 | |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 1030 | static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) |
| 1031 | { |
Artemy Kovalyov | 9c2c849 | 2017-08-17 15:52:05 +0300 | [diff] [blame] | 1032 | return srq_type == IB_SRQT_XRC || |
| 1033 | srq_type == IB_SRQT_TM; |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 1034 | } |
| 1035 | |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1036 | enum ib_srq_attr_mask { |
| 1037 | IB_SRQ_MAX_WR = 1 << 0, |
| 1038 | IB_SRQ_LIMIT = 1 << 1, |
| 1039 | }; |
| 1040 | |
| 1041 | struct ib_srq_attr { |
| 1042 | u32 max_wr; |
| 1043 | u32 max_sge; |
| 1044 | u32 srq_limit; |
| 1045 | }; |
| 1046 | |
| 1047 | struct ib_srq_init_attr { |
| 1048 | void (*event_handler)(struct ib_event *, void *); |
| 1049 | void *srq_context; |
| 1050 | struct ib_srq_attr attr; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 1051 | enum ib_srq_type srq_type; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 1052 | |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 1053 | struct { |
| 1054 | struct ib_cq *cq; |
| 1055 | union { |
| 1056 | struct { |
| 1057 | struct ib_xrcd *xrcd; |
| 1058 | } xrc; |
Artemy Kovalyov | 9c2c849 | 2017-08-17 15:52:05 +0300 | [diff] [blame] | 1059 | |
| 1060 | struct { |
| 1061 | u32 max_num_tags; |
| 1062 | } tag_matching; |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 1063 | }; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 1064 | } ext; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1065 | }; |
| 1066 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | struct ib_qp_cap { |
| 1068 | u32 max_send_wr; |
| 1069 | u32 max_recv_wr; |
| 1070 | u32 max_send_sge; |
| 1071 | u32 max_recv_sge; |
| 1072 | u32 max_inline_data; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 1073 | |
| 1074 | /* |
| 1075 | * Maximum number of rdma_rw_ctx structures in flight at a time. |
| 1076 | * ib_create_qp() will calculate the right amount of neededed WRs |
| 1077 | * and MRs based on this. |
| 1078 | */ |
| 1079 | u32 max_rdma_ctxs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1080 | }; |
| 1081 | |
| 1082 | enum ib_sig_type { |
| 1083 | IB_SIGNAL_ALL_WR, |
| 1084 | IB_SIGNAL_REQ_WR |
| 1085 | }; |
| 1086 | |
| 1087 | enum ib_qp_type { |
| 1088 | /* |
| 1089 | * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries |
| 1090 | * here (and in that order) since the MAD layer uses them as |
| 1091 | * indices into a 2-entry table. |
| 1092 | */ |
| 1093 | IB_QPT_SMI, |
| 1094 | IB_QPT_GSI, |
| 1095 | |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1096 | IB_QPT_RC = IB_UVERBS_QPT_RC, |
| 1097 | IB_QPT_UC = IB_UVERBS_QPT_UC, |
| 1098 | IB_QPT_UD = IB_UVERBS_QPT_UD, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | IB_QPT_RAW_IPV6, |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1100 | IB_QPT_RAW_ETHERTYPE, |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1101 | IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET, |
| 1102 | IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI, |
| 1103 | IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT, |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 1104 | IB_QPT_MAX, |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1105 | IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER, |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 1106 | /* Reserve a range for qp types internal to the low level driver. |
| 1107 | * These qp types will not be visible at the IB core layer, so the |
| 1108 | * IB_QPT_MAX usages should not be affected in the core layer |
| 1109 | */ |
| 1110 | IB_QPT_RESERVED1 = 0x1000, |
| 1111 | IB_QPT_RESERVED2, |
| 1112 | IB_QPT_RESERVED3, |
| 1113 | IB_QPT_RESERVED4, |
| 1114 | IB_QPT_RESERVED5, |
| 1115 | IB_QPT_RESERVED6, |
| 1116 | IB_QPT_RESERVED7, |
| 1117 | IB_QPT_RESERVED8, |
| 1118 | IB_QPT_RESERVED9, |
| 1119 | IB_QPT_RESERVED10, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | }; |
| 1121 | |
Eli Cohen | b846f25 | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 1122 | enum ib_qp_create_flags { |
Ron Livne | 47ee1b9 | 2008-07-14 23:48:48 -0700 | [diff] [blame] | 1123 | IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1124 | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = |
| 1125 | IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, |
Leon Romanovsky | 8a06ce5 | 2015-12-20 12:16:10 +0200 | [diff] [blame] | 1126 | IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, |
| 1127 | IB_QP_CREATE_MANAGED_SEND = 1 << 3, |
| 1128 | IB_QP_CREATE_MANAGED_RECV = 1 << 4, |
Matan Barak | 90f1d1b | 2013-11-07 15:25:12 +0200 | [diff] [blame] | 1129 | IB_QP_CREATE_NETIF_QP = 1 << 5, |
Israel Rukshin | c0a6cbb | 2019-06-11 18:52:50 +0300 | [diff] [blame] | 1130 | IB_QP_CREATE_INTEGRITY_EN = 1 << 6, |
Gary Leshner | 7f90a5a | 2020-05-11 12:06:07 -0400 | [diff] [blame] | 1131 | IB_QP_CREATE_NETDEV_USE = 1 << 7, |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1132 | IB_QP_CREATE_SCATTER_FCS = |
| 1133 | IB_UVERBS_QP_CREATE_SCATTER_FCS, |
| 1134 | IB_QP_CREATE_CVLAN_STRIPPING = |
| 1135 | IB_UVERBS_QP_CREATE_CVLAN_STRIPPING, |
Yishai Hadas | 02984cc | 2017-06-08 16:15:06 +0300 | [diff] [blame] | 1136 | IB_QP_CREATE_SOURCE_QPN = 1 << 10, |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1137 | IB_QP_CREATE_PCI_WRITE_END_PADDING = |
| 1138 | IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING, |
Jack Morgenstein | d2b5706 | 2012-08-03 08:40:37 +0000 | [diff] [blame] | 1139 | /* reserve bits 26-31 for low level drivers' internal use */ |
| 1140 | IB_QP_CREATE_RESERVED_START = 1 << 26, |
| 1141 | IB_QP_CREATE_RESERVED_END = 1 << 31, |
Eli Cohen | b846f25 | 2008-04-16 21:09:27 -0700 | [diff] [blame] | 1142 | }; |
| 1143 | |
Yishai Hadas | 73c40c6 | 2013-08-01 18:49:53 +0300 | [diff] [blame] | 1144 | /* |
| 1145 | * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler |
| 1146 | * callback to destroy the passed in QP. |
| 1147 | */ |
| 1148 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | struct ib_qp_init_attr { |
Chuck Lever | eb93c82e | 2018-09-04 11:45:20 -0400 | [diff] [blame] | 1150 | /* Consumer's event_handler callback must not block */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | void (*event_handler)(struct ib_event *, void *); |
Chuck Lever | eb93c82e | 2018-09-04 11:45:20 -0400 | [diff] [blame] | 1152 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 | void *qp_context; |
| 1154 | struct ib_cq *send_cq; |
| 1155 | struct ib_cq *recv_cq; |
| 1156 | struct ib_srq *srq; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1157 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | struct ib_qp_cap cap; |
| 1159 | enum ib_sig_type sq_sig_type; |
| 1160 | enum ib_qp_type qp_type; |
Nathan Chancellor | b56511c | 2018-09-24 12:57:16 -0700 | [diff] [blame] | 1161 | u32 create_flags; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 1162 | |
| 1163 | /* |
| 1164 | * Only needed for special QP types, or when using the RW API. |
| 1165 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 1166 | u32 port_num; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1167 | struct ib_rwq_ind_table *rwq_ind_tbl; |
Yishai Hadas | 02984cc | 2017-06-08 16:15:06 +0300 | [diff] [blame] | 1168 | u32 source_qpn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | }; |
| 1170 | |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1171 | struct ib_qp_open_attr { |
| 1172 | void (*event_handler)(struct ib_event *, void *); |
| 1173 | void *qp_context; |
| 1174 | u32 qp_num; |
| 1175 | enum ib_qp_type qp_type; |
| 1176 | }; |
| 1177 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | enum ib_rnr_timeout { |
| 1179 | IB_RNR_TIMER_655_36 = 0, |
| 1180 | IB_RNR_TIMER_000_01 = 1, |
| 1181 | IB_RNR_TIMER_000_02 = 2, |
| 1182 | IB_RNR_TIMER_000_03 = 3, |
| 1183 | IB_RNR_TIMER_000_04 = 4, |
| 1184 | IB_RNR_TIMER_000_06 = 5, |
| 1185 | IB_RNR_TIMER_000_08 = 6, |
| 1186 | IB_RNR_TIMER_000_12 = 7, |
| 1187 | IB_RNR_TIMER_000_16 = 8, |
| 1188 | IB_RNR_TIMER_000_24 = 9, |
| 1189 | IB_RNR_TIMER_000_32 = 10, |
| 1190 | IB_RNR_TIMER_000_48 = 11, |
| 1191 | IB_RNR_TIMER_000_64 = 12, |
| 1192 | IB_RNR_TIMER_000_96 = 13, |
| 1193 | IB_RNR_TIMER_001_28 = 14, |
| 1194 | IB_RNR_TIMER_001_92 = 15, |
| 1195 | IB_RNR_TIMER_002_56 = 16, |
| 1196 | IB_RNR_TIMER_003_84 = 17, |
| 1197 | IB_RNR_TIMER_005_12 = 18, |
| 1198 | IB_RNR_TIMER_007_68 = 19, |
| 1199 | IB_RNR_TIMER_010_24 = 20, |
| 1200 | IB_RNR_TIMER_015_36 = 21, |
| 1201 | IB_RNR_TIMER_020_48 = 22, |
| 1202 | IB_RNR_TIMER_030_72 = 23, |
| 1203 | IB_RNR_TIMER_040_96 = 24, |
| 1204 | IB_RNR_TIMER_061_44 = 25, |
| 1205 | IB_RNR_TIMER_081_92 = 26, |
| 1206 | IB_RNR_TIMER_122_88 = 27, |
| 1207 | IB_RNR_TIMER_163_84 = 28, |
| 1208 | IB_RNR_TIMER_245_76 = 29, |
| 1209 | IB_RNR_TIMER_327_68 = 30, |
| 1210 | IB_RNR_TIMER_491_52 = 31 |
| 1211 | }; |
| 1212 | |
| 1213 | enum ib_qp_attr_mask { |
| 1214 | IB_QP_STATE = 1, |
| 1215 | IB_QP_CUR_STATE = (1<<1), |
| 1216 | IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), |
| 1217 | IB_QP_ACCESS_FLAGS = (1<<3), |
| 1218 | IB_QP_PKEY_INDEX = (1<<4), |
| 1219 | IB_QP_PORT = (1<<5), |
| 1220 | IB_QP_QKEY = (1<<6), |
| 1221 | IB_QP_AV = (1<<7), |
| 1222 | IB_QP_PATH_MTU = (1<<8), |
| 1223 | IB_QP_TIMEOUT = (1<<9), |
| 1224 | IB_QP_RETRY_CNT = (1<<10), |
| 1225 | IB_QP_RNR_RETRY = (1<<11), |
| 1226 | IB_QP_RQ_PSN = (1<<12), |
| 1227 | IB_QP_MAX_QP_RD_ATOMIC = (1<<13), |
| 1228 | IB_QP_ALT_PATH = (1<<14), |
| 1229 | IB_QP_MIN_RNR_TIMER = (1<<15), |
| 1230 | IB_QP_SQ_PSN = (1<<16), |
| 1231 | IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), |
| 1232 | IB_QP_PATH_MIG_STATE = (1<<18), |
| 1233 | IB_QP_CAP = (1<<19), |
Matan Barak | dd5f03b | 2013-12-12 18:03:11 +0200 | [diff] [blame] | 1234 | IB_QP_DEST_QPN = (1<<20), |
Matan Barak | aa744cc | 2015-10-15 18:38:53 +0300 | [diff] [blame] | 1235 | IB_QP_RESERVED1 = (1<<21), |
| 1236 | IB_QP_RESERVED2 = (1<<22), |
| 1237 | IB_QP_RESERVED3 = (1<<23), |
| 1238 | IB_QP_RESERVED4 = (1<<24), |
Bodong Wang | 528e5a1 | 2016-12-01 13:43:14 +0200 | [diff] [blame] | 1239 | IB_QP_RATE_LIMIT = (1<<25), |
Jason Gunthorpe | 26e990b | 2020-10-03 20:20:06 -0300 | [diff] [blame] | 1240 | |
| 1241 | IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1242 | }; |
| 1243 | |
| 1244 | enum ib_qp_state { |
| 1245 | IB_QPS_RESET, |
| 1246 | IB_QPS_INIT, |
| 1247 | IB_QPS_RTR, |
| 1248 | IB_QPS_RTS, |
| 1249 | IB_QPS_SQD, |
| 1250 | IB_QPS_SQE, |
| 1251 | IB_QPS_ERR |
| 1252 | }; |
| 1253 | |
| 1254 | enum ib_mig_state { |
| 1255 | IB_MIG_MIGRATED, |
| 1256 | IB_MIG_REARM, |
| 1257 | IB_MIG_ARMED |
| 1258 | }; |
| 1259 | |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1260 | enum ib_mw_type { |
| 1261 | IB_MW_TYPE_1 = 1, |
| 1262 | IB_MW_TYPE_2 = 2 |
| 1263 | }; |
| 1264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1265 | struct ib_qp_attr { |
| 1266 | enum ib_qp_state qp_state; |
| 1267 | enum ib_qp_state cur_qp_state; |
| 1268 | enum ib_mtu path_mtu; |
| 1269 | enum ib_mig_state path_mig_state; |
| 1270 | u32 qkey; |
| 1271 | u32 rq_psn; |
| 1272 | u32 sq_psn; |
| 1273 | u32 dest_qp_num; |
| 1274 | int qp_access_flags; |
| 1275 | struct ib_qp_cap cap; |
Dasaratharaman Chandramouli | 9089885 | 2017-04-29 14:41:18 -0400 | [diff] [blame] | 1276 | struct rdma_ah_attr ah_attr; |
| 1277 | struct rdma_ah_attr alt_ah_attr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | u16 pkey_index; |
| 1279 | u16 alt_pkey_index; |
| 1280 | u8 en_sqd_async_notify; |
| 1281 | u8 sq_draining; |
| 1282 | u8 max_rd_atomic; |
| 1283 | u8 max_dest_rd_atomic; |
| 1284 | u8 min_rnr_timer; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 1285 | u32 port_num; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | u8 timeout; |
| 1287 | u8 retry_cnt; |
| 1288 | u8 rnr_retry; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 1289 | u32 alt_port_num; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | u8 alt_timeout; |
Bodong Wang | 528e5a1 | 2016-12-01 13:43:14 +0200 | [diff] [blame] | 1291 | u32 rate_limit; |
Maor Gottlieb | 51aab12 | 2020-04-30 22:21:44 +0300 | [diff] [blame] | 1292 | struct net_device *xmit_slave; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1293 | }; |
| 1294 | |
| 1295 | enum ib_wr_opcode { |
Jason Gunthorpe | 9a59739 | 2018-08-14 15:33:02 -0700 | [diff] [blame] | 1296 | /* These are shared with userspace */ |
| 1297 | IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE, |
| 1298 | IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM, |
| 1299 | IB_WR_SEND = IB_UVERBS_WR_SEND, |
| 1300 | IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM, |
| 1301 | IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, |
| 1302 | IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, |
| 1303 | IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, |
Bob Pearson | b60b9c0 | 2020-09-03 17:40:34 -0500 | [diff] [blame] | 1304 | IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW, |
Jason Gunthorpe | 9a59739 | 2018-08-14 15:33:02 -0700 | [diff] [blame] | 1305 | IB_WR_LSO = IB_UVERBS_WR_TSO, |
| 1306 | IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, |
| 1307 | IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, |
| 1308 | IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV, |
| 1309 | IB_WR_MASKED_ATOMIC_CMP_AND_SWP = |
| 1310 | IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP, |
| 1311 | IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = |
| 1312 | IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
| 1313 | |
| 1314 | /* These are kernel only and can not be issued by userspace */ |
| 1315 | IB_WR_REG_MR = 0x20, |
Max Gurtovoy | 38ca87c | 2019-06-11 18:52:46 +0300 | [diff] [blame] | 1316 | IB_WR_REG_MR_INTEGRITY, |
Jason Gunthorpe | 9a59739 | 2018-08-14 15:33:02 -0700 | [diff] [blame] | 1317 | |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 1318 | /* reserve values for low level drivers' internal use. |
| 1319 | * These values will not be used at all in the ib core layer. |
| 1320 | */ |
| 1321 | IB_WR_RESERVED1 = 0xf0, |
| 1322 | IB_WR_RESERVED2, |
| 1323 | IB_WR_RESERVED3, |
| 1324 | IB_WR_RESERVED4, |
| 1325 | IB_WR_RESERVED5, |
| 1326 | IB_WR_RESERVED6, |
| 1327 | IB_WR_RESERVED7, |
| 1328 | IB_WR_RESERVED8, |
| 1329 | IB_WR_RESERVED9, |
| 1330 | IB_WR_RESERVED10, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | }; |
| 1332 | |
| 1333 | enum ib_send_flags { |
| 1334 | IB_SEND_FENCE = 1, |
| 1335 | IB_SEND_SIGNALED = (1<<1), |
| 1336 | IB_SEND_SOLICITED = (1<<2), |
Eli Cohen | e0605d9 | 2008-01-30 18:30:57 +0200 | [diff] [blame] | 1337 | IB_SEND_INLINE = (1<<3), |
Jack Morgenstein | 0134f16 | 2013-07-07 17:25:52 +0300 | [diff] [blame] | 1338 | IB_SEND_IP_CSUM = (1<<4), |
| 1339 | |
| 1340 | /* reserve bits 26-31 for low level drivers' internal use */ |
| 1341 | IB_SEND_RESERVED_START = (1 << 26), |
| 1342 | IB_SEND_RESERVED_END = (1 << 31), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 | }; |
| 1344 | |
| 1345 | struct ib_sge { |
| 1346 | u64 addr; |
| 1347 | u32 length; |
| 1348 | u32 lkey; |
| 1349 | }; |
| 1350 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1351 | struct ib_cqe { |
| 1352 | void (*done)(struct ib_cq *cq, struct ib_wc *wc); |
| 1353 | }; |
| 1354 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | struct ib_send_wr { |
| 1356 | struct ib_send_wr *next; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1357 | union { |
| 1358 | u64 wr_id; |
| 1359 | struct ib_cqe *wr_cqe; |
| 1360 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1361 | struct ib_sge *sg_list; |
| 1362 | int num_sge; |
| 1363 | enum ib_wr_opcode opcode; |
| 1364 | int send_flags; |
Roland Dreier | 0f39cf3 | 2008-04-16 21:09:32 -0700 | [diff] [blame] | 1365 | union { |
| 1366 | __be32 imm_data; |
| 1367 | u32 invalidate_rkey; |
| 1368 | } ex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | }; |
| 1370 | |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1371 | struct ib_rdma_wr { |
| 1372 | struct ib_send_wr wr; |
| 1373 | u64 remote_addr; |
| 1374 | u32 rkey; |
| 1375 | }; |
| 1376 | |
Bart Van Assche | f696bf6 | 2018-07-18 09:25:14 -0700 | [diff] [blame] | 1377 | static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr) |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1378 | { |
| 1379 | return container_of(wr, struct ib_rdma_wr, wr); |
| 1380 | } |
| 1381 | |
| 1382 | struct ib_atomic_wr { |
| 1383 | struct ib_send_wr wr; |
| 1384 | u64 remote_addr; |
| 1385 | u64 compare_add; |
| 1386 | u64 swap; |
| 1387 | u64 compare_add_mask; |
| 1388 | u64 swap_mask; |
| 1389 | u32 rkey; |
| 1390 | }; |
| 1391 | |
Bart Van Assche | f696bf6 | 2018-07-18 09:25:14 -0700 | [diff] [blame] | 1392 | static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr) |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1393 | { |
| 1394 | return container_of(wr, struct ib_atomic_wr, wr); |
| 1395 | } |
| 1396 | |
| 1397 | struct ib_ud_wr { |
| 1398 | struct ib_send_wr wr; |
| 1399 | struct ib_ah *ah; |
| 1400 | void *header; |
| 1401 | int hlen; |
| 1402 | int mss; |
| 1403 | u32 remote_qpn; |
| 1404 | u32 remote_qkey; |
| 1405 | u16 pkey_index; /* valid for GSI only */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 1406 | u32 port_num; /* valid for DR SMPs on switch only */ |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1407 | }; |
| 1408 | |
Bart Van Assche | f696bf6 | 2018-07-18 09:25:14 -0700 | [diff] [blame] | 1409 | static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 1410 | { |
| 1411 | return container_of(wr, struct ib_ud_wr, wr); |
| 1412 | } |
| 1413 | |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1414 | struct ib_reg_wr { |
| 1415 | struct ib_send_wr wr; |
| 1416 | struct ib_mr *mr; |
| 1417 | u32 key; |
| 1418 | int access; |
| 1419 | }; |
| 1420 | |
Bart Van Assche | f696bf6 | 2018-07-18 09:25:14 -0700 | [diff] [blame] | 1421 | static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1422 | { |
| 1423 | return container_of(wr, struct ib_reg_wr, wr); |
| 1424 | } |
| 1425 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | struct ib_recv_wr { |
| 1427 | struct ib_recv_wr *next; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1428 | union { |
| 1429 | u64 wr_id; |
| 1430 | struct ib_cqe *wr_cqe; |
| 1431 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1432 | struct ib_sge *sg_list; |
| 1433 | int num_sge; |
| 1434 | }; |
| 1435 | |
| 1436 | enum ib_access_flags { |
Jason Gunthorpe | 4fca037 | 2018-07-11 16:20:44 -0600 | [diff] [blame] | 1437 | IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE, |
| 1438 | IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE, |
| 1439 | IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ, |
| 1440 | IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC, |
| 1441 | IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND, |
| 1442 | IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED, |
| 1443 | IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND, |
| 1444 | IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB, |
Michael Guralnik | 2233c66 | 2020-01-08 20:05:38 +0200 | [diff] [blame] | 1445 | IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING, |
Jason Gunthorpe | 4fca037 | 2018-07-11 16:20:44 -0600 | [diff] [blame] | 1446 | |
Michael Guralnik | 68d384b | 2020-01-08 20:05:36 +0200 | [diff] [blame] | 1447 | IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE, |
| 1448 | IB_ACCESS_SUPPORTED = |
| 1449 | ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1450 | }; |
| 1451 | |
Christoph Hellwig | b7d3e0a | 2015-12-23 19:12:47 +0100 | [diff] [blame] | 1452 | /* |
| 1453 | * XXX: these are apparently used for ->rereg_user_mr, no idea why they |
| 1454 | * are hidden here instead of a uapi header! |
| 1455 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | enum ib_mr_rereg_flags { |
| 1457 | IB_MR_REREG_TRANS = 1, |
| 1458 | IB_MR_REREG_PD = (1<<1), |
Matan Barak | 7e6edb9 | 2014-07-31 11:01:28 +0300 | [diff] [blame] | 1459 | IB_MR_REREG_ACCESS = (1<<2), |
| 1460 | IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | }; |
| 1462 | |
Haggai Eran | 882214e | 2014-12-11 17:04:18 +0200 | [diff] [blame] | 1463 | struct ib_umem; |
| 1464 | |
Matan Barak | 3832125 | 2017-04-04 13:31:42 +0300 | [diff] [blame] | 1465 | enum rdma_remove_reason { |
Yishai Hadas | 1c77483 | 2018-06-20 17:11:39 +0300 | [diff] [blame] | 1466 | /* |
| 1467 | * Userspace requested uobject deletion or initial try |
| 1468 | * to remove uobject via cleanup. Call could fail |
| 1469 | */ |
Matan Barak | 3832125 | 2017-04-04 13:31:42 +0300 | [diff] [blame] | 1470 | RDMA_REMOVE_DESTROY, |
| 1471 | /* Context deletion. This call should delete the actual object itself */ |
| 1472 | RDMA_REMOVE_CLOSE, |
| 1473 | /* Driver is being hot-unplugged. This call should delete the actual object itself */ |
| 1474 | RDMA_REMOVE_DRIVER_REMOVE, |
Jason Gunthorpe | 87ad80a | 2018-07-25 21:40:12 -0600 | [diff] [blame] | 1475 | /* uobj is being cleaned-up before being committed */ |
| 1476 | RDMA_REMOVE_ABORT, |
Leon Romanovsky | efa968e | 2020-11-04 16:45:55 +0200 | [diff] [blame] | 1477 | /* The driver failed to destroy the uobject and is being disconnected */ |
| 1478 | RDMA_REMOVE_DRIVER_FAILURE, |
Matan Barak | 3832125 | 2017-04-04 13:31:42 +0300 | [diff] [blame] | 1479 | }; |
| 1480 | |
Parav Pandit | 43579b5 | 2017-01-10 00:02:14 +0000 | [diff] [blame] | 1481 | struct ib_rdmacg_object { |
| 1482 | #ifdef CONFIG_CGROUP_RDMA |
| 1483 | struct rdma_cgroup *cg; /* owner rdma cgroup */ |
| 1484 | #endif |
| 1485 | }; |
| 1486 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1487 | struct ib_ucontext { |
| 1488 | struct ib_device *device; |
Matan Barak | 771addf | 2017-04-04 13:31:41 +0300 | [diff] [blame] | 1489 | struct ib_uverbs_file *ufile; |
Shachar Raindel | 8ada2c1 | 2014-12-11 17:04:17 +0200 | [diff] [blame] | 1490 | |
Parav Pandit | 43579b5 | 2017-01-10 00:02:14 +0000 | [diff] [blame] | 1491 | struct ib_rdmacg_object cg_obj; |
Leon Romanovsky | 6061521 | 2018-11-28 13:16:43 +0200 | [diff] [blame] | 1492 | /* |
| 1493 | * Implementation details of the RDMA core, don't use in drivers: |
| 1494 | */ |
| 1495 | struct rdma_restrack_entry res; |
Michal Kalderon | 3411f9f | 2019-10-30 11:44:11 +0200 | [diff] [blame] | 1496 | struct xarray mmap_xa; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1497 | }; |
| 1498 | |
| 1499 | struct ib_uobject { |
| 1500 | u64 user_handle; /* handle given to us by userspace */ |
Jason Gunthorpe | 6a5e9c8 | 2018-07-04 11:32:07 +0300 | [diff] [blame] | 1501 | /* ufile & ucontext owning this object */ |
| 1502 | struct ib_uverbs_file *ufile; |
| 1503 | /* FIXME, save memory: ufile->context == context */ |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1504 | struct ib_ucontext *context; /* associated user context */ |
Roland Dreier | 9ead190 | 2006-06-17 20:44:49 -0700 | [diff] [blame] | 1505 | void *object; /* containing object */ |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1506 | struct list_head list; /* link to context's list */ |
Parav Pandit | 43579b5 | 2017-01-10 00:02:14 +0000 | [diff] [blame] | 1507 | struct ib_rdmacg_object cg_obj; /* rdmacg object */ |
Roland Dreier | b3d636b | 2008-04-16 21:01:06 -0700 | [diff] [blame] | 1508 | int id; /* index into kernel idr */ |
Roland Dreier | 9ead190 | 2006-06-17 20:44:49 -0700 | [diff] [blame] | 1509 | struct kref ref; |
Matan Barak | 3832125 | 2017-04-04 13:31:42 +0300 | [diff] [blame] | 1510 | atomic_t usecnt; /* protects exclusive access */ |
Mike Marciniszyn | d144da8 | 2015-11-02 12:13:25 -0500 | [diff] [blame] | 1511 | struct rcu_head rcu; /* kfree_rcu() overhead */ |
Matan Barak | 3832125 | 2017-04-04 13:31:42 +0300 | [diff] [blame] | 1512 | |
Jason Gunthorpe | 6b0d08f | 2018-08-09 20:14:37 -0600 | [diff] [blame] | 1513 | const struct uverbs_api_object *uapi_object; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1514 | }; |
| 1515 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1516 | struct ib_udata { |
Yann Droneaud | 309243e | 2013-12-11 23:01:44 +0100 | [diff] [blame] | 1517 | const void __user *inbuf; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1518 | void __user *outbuf; |
| 1519 | size_t inlen; |
| 1520 | size_t outlen; |
| 1521 | }; |
| 1522 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | struct ib_pd { |
Jason Gunthorpe | 96249d7 | 2015-08-05 14:14:45 -0600 | [diff] [blame] | 1524 | u32 local_dma_lkey; |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 1525 | u32 flags; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1526 | struct ib_device *device; |
| 1527 | struct ib_uobject *uobject; |
| 1528 | atomic_t usecnt; /* count all resources */ |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 1529 | |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 1530 | u32 unsafe_global_rkey; |
| 1531 | |
Christoph Hellwig | 50d4633 | 2016-09-05 12:56:16 +0200 | [diff] [blame] | 1532 | /* |
| 1533 | * Implementation details of the RDMA core, don't use in drivers: |
| 1534 | */ |
| 1535 | struct ib_mr *__internal_mr; |
Leon Romanovsky | 02d8883 | 2018-01-28 11:17:20 +0200 | [diff] [blame] | 1536 | struct rdma_restrack_entry res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1537 | }; |
| 1538 | |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1539 | struct ib_xrcd { |
| 1540 | struct ib_device *device; |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1541 | atomic_t usecnt; /* count all exposed resources */ |
Sean Hefty | 53d0bd1 | 2011-05-24 08:33:46 -0700 | [diff] [blame] | 1542 | struct inode *inode; |
Maor Gottlieb | 6f3ca6f | 2020-07-06 15:27:16 +0300 | [diff] [blame] | 1543 | struct rw_semaphore tgt_qps_rwsem; |
| 1544 | struct xarray tgt_qps; |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 1545 | }; |
| 1546 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 | struct ib_ah { |
| 1548 | struct ib_device *device; |
| 1549 | struct ib_pd *pd; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1550 | struct ib_uobject *uobject; |
Jason Gunthorpe | 1a1f460 | 2018-06-13 10:22:08 +0300 | [diff] [blame] | 1551 | const struct ib_gid_attr *sgid_attr; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 1552 | enum rdma_ah_attr_type type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 | }; |
| 1554 | |
| 1555 | typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); |
| 1556 | |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1557 | enum ib_poll_context { |
Jack Morgenstein | f794809 | 2018-08-27 08:35:55 +0300 | [diff] [blame] | 1558 | IB_POLL_SOFTIRQ, /* poll from softirq context */ |
| 1559 | IB_POLL_WORKQUEUE, /* poll from workqueue */ |
| 1560 | IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ |
Yamin Friedman | c7ff819 | 2020-05-27 11:34:53 +0300 | [diff] [blame] | 1561 | IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE, |
| 1562 | |
| 1563 | IB_POLL_DIRECT, /* caller context, no hw completions */ |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1564 | }; |
| 1565 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1566 | struct ib_cq { |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1567 | struct ib_device *device; |
Jason Gunthorpe | 5bd48c1 | 2020-01-08 19:21:58 +0200 | [diff] [blame] | 1568 | struct ib_ucq_object *uobject; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1569 | ib_comp_handler comp_handler; |
| 1570 | void (*event_handler)(struct ib_event *, void *); |
Dotan Barak | 4deccd6 | 2008-07-14 23:48:44 -0700 | [diff] [blame] | 1571 | void *cq_context; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1572 | int cqe; |
Yamin Friedman | c7ff819 | 2020-05-27 11:34:53 +0300 | [diff] [blame] | 1573 | unsigned int cqe_used; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1574 | atomic_t usecnt; /* count number of work queues */ |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1575 | enum ib_poll_context poll_ctx; |
| 1576 | struct ib_wc *wc; |
Yamin Friedman | c7ff819 | 2020-05-27 11:34:53 +0300 | [diff] [blame] | 1577 | struct list_head pool_entry; |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 1578 | union { |
| 1579 | struct irq_poll iop; |
| 1580 | struct work_struct work; |
| 1581 | }; |
Jack Morgenstein | f794809 | 2018-08-27 08:35:55 +0300 | [diff] [blame] | 1582 | struct workqueue_struct *comp_wq; |
Yamin Friedman | da66297 | 2019-07-08 13:59:03 +0300 | [diff] [blame] | 1583 | struct dim *dim; |
Chuck Lever | 3e5901c | 2019-12-18 15:18:15 -0500 | [diff] [blame] | 1584 | |
| 1585 | /* updated only by trace points */ |
| 1586 | ktime_t timestamp; |
Yamin Friedman | 3446cbd | 2020-05-27 11:34:52 +0300 | [diff] [blame] | 1587 | u8 interrupt:1; |
| 1588 | u8 shared:1; |
Yamin Friedman | c7ff819 | 2020-05-27 11:34:53 +0300 | [diff] [blame] | 1589 | unsigned int comp_vector; |
Chuck Lever | 3e5901c | 2019-12-18 15:18:15 -0500 | [diff] [blame] | 1590 | |
Leon Romanovsky | 02d8883 | 2018-01-28 11:17:20 +0200 | [diff] [blame] | 1591 | /* |
| 1592 | * Implementation details of the RDMA core, don't use in drivers: |
| 1593 | */ |
| 1594 | struct rdma_restrack_entry res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | }; |
| 1596 | |
| 1597 | struct ib_srq { |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1598 | struct ib_device *device; |
| 1599 | struct ib_pd *pd; |
Jason Gunthorpe | 9fbe334 | 2020-01-08 19:22:00 +0200 | [diff] [blame] | 1600 | struct ib_usrq_object *uobject; |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 1601 | void (*event_handler)(struct ib_event *, void *); |
| 1602 | void *srq_context; |
Sean Hefty | 96104ed | 2011-05-23 16:31:36 -0700 | [diff] [blame] | 1603 | enum ib_srq_type srq_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | atomic_t usecnt; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 1605 | |
Artemy Kovalyov | 1a56ff6 | 2017-08-17 15:52:04 +0300 | [diff] [blame] | 1606 | struct { |
| 1607 | struct ib_cq *cq; |
| 1608 | union { |
| 1609 | struct { |
| 1610 | struct ib_xrcd *xrcd; |
| 1611 | u32 srq_num; |
| 1612 | } xrc; |
| 1613 | }; |
Sean Hefty | 418d513 | 2011-05-23 19:42:29 -0700 | [diff] [blame] | 1614 | } ext; |
Neta Ostrovsky | 48f8a70 | 2021-04-18 16:41:24 +0300 | [diff] [blame] | 1615 | |
| 1616 | /* |
| 1617 | * Implementation details of the RDMA core, don't use in drivers: |
| 1618 | */ |
| 1619 | struct rdma_restrack_entry res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1620 | }; |
| 1621 | |
Noa Osherovich | ebaaee2 | 2017-01-18 15:39:54 +0200 | [diff] [blame] | 1622 | enum ib_raw_packet_caps { |
| 1623 | /* Strip cvlan from incoming packet and report it in the matching work |
| 1624 | * completion is supported. |
| 1625 | */ |
| 1626 | IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), |
| 1627 | /* Scatter FCS field of an incoming packet to host memory is supported. |
| 1628 | */ |
| 1629 | IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), |
| 1630 | /* Checksum offloads are supported (for both send and receive). */ |
| 1631 | IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), |
Maor Gottlieb | 7d9336d | 2017-05-30 10:29:10 +0300 | [diff] [blame] | 1632 | /* When a packet is received for an RQ with no receive WQEs, the |
| 1633 | * packet processing is delayed. |
| 1634 | */ |
| 1635 | IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3), |
Noa Osherovich | ebaaee2 | 2017-01-18 15:39:54 +0200 | [diff] [blame] | 1636 | }; |
| 1637 | |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1638 | enum ib_wq_type { |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1639 | IB_WQT_RQ = IB_UVERBS_WQT_RQ, |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1640 | }; |
| 1641 | |
| 1642 | enum ib_wq_state { |
| 1643 | IB_WQS_RESET, |
| 1644 | IB_WQS_RDY, |
| 1645 | IB_WQS_ERR |
| 1646 | }; |
| 1647 | |
| 1648 | struct ib_wq { |
| 1649 | struct ib_device *device; |
Jason Gunthorpe | e04dd13 | 2020-01-08 19:22:01 +0200 | [diff] [blame] | 1650 | struct ib_uwq_object *uobject; |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1651 | void *wq_context; |
| 1652 | void (*event_handler)(struct ib_event *, void *); |
| 1653 | struct ib_pd *pd; |
| 1654 | struct ib_cq *cq; |
| 1655 | u32 wq_num; |
| 1656 | enum ib_wq_state state; |
| 1657 | enum ib_wq_type wq_type; |
| 1658 | atomic_t usecnt; |
| 1659 | }; |
| 1660 | |
Noa Osherovich | 10bac72 | 2017-01-18 15:39:55 +0200 | [diff] [blame] | 1661 | enum ib_wq_flags { |
Yishai Hadas | 175ba58 | 2020-05-19 10:27:08 +0300 | [diff] [blame] | 1662 | IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING, |
| 1663 | IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS, |
| 1664 | IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP, |
| 1665 | IB_WQ_FLAGS_PCI_WRITE_END_PADDING = |
| 1666 | IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING, |
Noa Osherovich | 10bac72 | 2017-01-18 15:39:55 +0200 | [diff] [blame] | 1667 | }; |
| 1668 | |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1669 | struct ib_wq_init_attr { |
| 1670 | void *wq_context; |
| 1671 | enum ib_wq_type wq_type; |
| 1672 | u32 max_wr; |
| 1673 | u32 max_sge; |
| 1674 | struct ib_cq *cq; |
| 1675 | void (*event_handler)(struct ib_event *, void *); |
Noa Osherovich | 10bac72 | 2017-01-18 15:39:55 +0200 | [diff] [blame] | 1676 | u32 create_flags; /* Use enum ib_wq_flags */ |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1677 | }; |
| 1678 | |
| 1679 | enum ib_wq_attr_mask { |
Noa Osherovich | 10bac72 | 2017-01-18 15:39:55 +0200 | [diff] [blame] | 1680 | IB_WQ_STATE = 1 << 0, |
| 1681 | IB_WQ_CUR_STATE = 1 << 1, |
| 1682 | IB_WQ_FLAGS = 1 << 2, |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1683 | }; |
| 1684 | |
| 1685 | struct ib_wq_attr { |
| 1686 | enum ib_wq_state wq_state; |
| 1687 | enum ib_wq_state curr_wq_state; |
Noa Osherovich | 10bac72 | 2017-01-18 15:39:55 +0200 | [diff] [blame] | 1688 | u32 flags; /* Use enum ib_wq_flags */ |
| 1689 | u32 flags_mask; /* Use enum ib_wq_flags */ |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 1690 | }; |
| 1691 | |
Yishai Hadas | 6d39786 | 2016-05-23 15:20:51 +0300 | [diff] [blame] | 1692 | struct ib_rwq_ind_table { |
| 1693 | struct ib_device *device; |
| 1694 | struct ib_uobject *uobject; |
| 1695 | atomic_t usecnt; |
| 1696 | u32 ind_tbl_num; |
| 1697 | u32 log_ind_tbl_size; |
| 1698 | struct ib_wq **ind_tbl; |
| 1699 | }; |
| 1700 | |
| 1701 | struct ib_rwq_ind_table_init_attr { |
| 1702 | u32 log_ind_tbl_size; |
| 1703 | /* Each entry is a pointer to Receive Work Queue */ |
| 1704 | struct ib_wq **ind_tbl; |
| 1705 | }; |
| 1706 | |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1707 | enum port_pkey_state { |
| 1708 | IB_PORT_PKEY_NOT_VALID = 0, |
| 1709 | IB_PORT_PKEY_VALID = 1, |
| 1710 | IB_PORT_PKEY_LISTED = 2, |
| 1711 | }; |
| 1712 | |
| 1713 | struct ib_qp_security; |
| 1714 | |
| 1715 | struct ib_port_pkey { |
| 1716 | enum port_pkey_state state; |
| 1717 | u16 pkey_index; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 1718 | u32 port_num; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1719 | struct list_head qp_list; |
| 1720 | struct list_head to_error_list; |
| 1721 | struct ib_qp_security *sec; |
| 1722 | }; |
| 1723 | |
| 1724 | struct ib_ports_pkeys { |
| 1725 | struct ib_port_pkey main; |
| 1726 | struct ib_port_pkey alt; |
| 1727 | }; |
| 1728 | |
| 1729 | struct ib_qp_security { |
| 1730 | struct ib_qp *qp; |
| 1731 | struct ib_device *dev; |
| 1732 | /* Hold this mutex when changing port and pkey settings. */ |
| 1733 | struct mutex mutex; |
| 1734 | struct ib_ports_pkeys *ports_pkeys; |
| 1735 | /* A list of all open shared QP handles. Required to enforce security |
| 1736 | * properly for all users of a shared QP. |
| 1737 | */ |
| 1738 | struct list_head shared_qp_list; |
| 1739 | void *security; |
| 1740 | bool destroying; |
| 1741 | atomic_t error_list_count; |
| 1742 | struct completion error_complete; |
| 1743 | int error_comps_pending; |
| 1744 | }; |
| 1745 | |
Bart Van Assche | 632bc3f | 2016-07-21 13:03:30 -0700 | [diff] [blame] | 1746 | /* |
| 1747 | * @max_write_sge: Maximum SGE elements per RDMA WRITE request. |
| 1748 | * @max_read_sge: Maximum SGE elements per RDMA READ request. |
| 1749 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 | struct ib_qp { |
| 1751 | struct ib_device *device; |
| 1752 | struct ib_pd *pd; |
| 1753 | struct ib_cq *send_cq; |
| 1754 | struct ib_cq *recv_cq; |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1755 | spinlock_t mr_lock; |
| 1756 | int mrs_used; |
Christoph Hellwig | a060b56 | 2016-05-03 18:01:09 +0200 | [diff] [blame] | 1757 | struct list_head rdma_mrs; |
Christoph Hellwig | 0e353e3 | 2016-05-03 18:01:12 +0200 | [diff] [blame] | 1758 | struct list_head sig_mrs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1759 | struct ib_srq *srq; |
Sean Hefty | b42b63c | 2011-05-23 19:59:25 -0700 | [diff] [blame] | 1760 | struct ib_xrcd *xrcd; /* XRC TGT QPs only */ |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 1761 | struct list_head xrcd_list; |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1762 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1763 | /* count times opened, mcast attaches, flow attaches */ |
| 1764 | atomic_t usecnt; |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 1765 | struct list_head open_list; |
| 1766 | struct ib_qp *real_qp; |
Jason Gunthorpe | 620d3f8 | 2020-01-08 19:21:59 +0200 | [diff] [blame] | 1767 | struct ib_uqp_object *uobject; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | void (*event_handler)(struct ib_event *, void *); |
| 1769 | void *qp_context; |
Jason Gunthorpe | 1a1f460 | 2018-06-13 10:22:08 +0300 | [diff] [blame] | 1770 | /* sgid_attrs associated with the AV's */ |
| 1771 | const struct ib_gid_attr *av_sgid_attr; |
| 1772 | const struct ib_gid_attr *alt_path_sgid_attr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1773 | u32 qp_num; |
Bart Van Assche | 632bc3f | 2016-07-21 13:03:30 -0700 | [diff] [blame] | 1774 | u32 max_write_sge; |
| 1775 | u32 max_read_sge; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 | enum ib_qp_type qp_type; |
Yishai Hadas | a9017e2 | 2016-05-23 15:20:54 +0300 | [diff] [blame] | 1777 | struct ib_rwq_ind_table *rwq_ind_tbl; |
Daniel Jurgens | d291f1a | 2017-05-19 15:48:52 +0300 | [diff] [blame] | 1778 | struct ib_qp_security *qp_sec; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 1779 | u32 port; |
Leon Romanovsky | 02d8883 | 2018-01-28 11:17:20 +0200 | [diff] [blame] | 1780 | |
Max Gurtovoy | 185eddc | 2019-06-11 18:52:51 +0300 | [diff] [blame] | 1781 | bool integrity_en; |
Leon Romanovsky | 02d8883 | 2018-01-28 11:17:20 +0200 | [diff] [blame] | 1782 | /* |
| 1783 | * Implementation details of the RDMA core, don't use in drivers: |
| 1784 | */ |
| 1785 | struct rdma_restrack_entry res; |
Mark Zhang | 99fa331 | 2019-07-02 13:02:35 +0300 | [diff] [blame] | 1786 | |
| 1787 | /* The counter the qp is bind to */ |
| 1788 | struct rdma_counter *counter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1789 | }; |
| 1790 | |
Ariel Levkovich | bee76d7 | 2018-04-05 18:53:24 +0300 | [diff] [blame] | 1791 | struct ib_dm { |
| 1792 | struct ib_device *device; |
| 1793 | u32 length; |
| 1794 | u32 flags; |
| 1795 | struct ib_uobject *uobject; |
| 1796 | atomic_t usecnt; |
| 1797 | }; |
| 1798 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | struct ib_mr { |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1800 | struct ib_device *device; |
| 1801 | struct ib_pd *pd; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1802 | u32 lkey; |
| 1803 | u32 rkey; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1804 | u64 iova; |
Parav Pandit | edd3155 | 2017-09-24 21:46:31 +0300 | [diff] [blame] | 1805 | u64 length; |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 1806 | unsigned int page_size; |
Max Gurtovoy | a0bc099 | 2019-06-11 18:52:38 +0300 | [diff] [blame] | 1807 | enum ib_mr_type type; |
Steve Wise | d4a85c3 | 2016-05-03 18:01:08 +0200 | [diff] [blame] | 1808 | bool need_inval; |
Christoph Hellwig | fffb038 | 2016-05-03 18:01:07 +0200 | [diff] [blame] | 1809 | union { |
| 1810 | struct ib_uobject *uobject; /* user */ |
| 1811 | struct list_head qp_entry; /* FR */ |
| 1812 | }; |
Steve Wise | fccec5b | 2018-03-01 13:58:13 -0800 | [diff] [blame] | 1813 | |
Ariel Levkovich | be934cc | 2018-04-05 18:53:25 +0300 | [diff] [blame] | 1814 | struct ib_dm *dm; |
Max Gurtovoy | 7c717d3 | 2019-06-11 18:52:41 +0300 | [diff] [blame] | 1815 | struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ |
Steve Wise | fccec5b | 2018-03-01 13:58:13 -0800 | [diff] [blame] | 1816 | /* |
| 1817 | * Implementation details of the RDMA core, don't use in drivers: |
| 1818 | */ |
| 1819 | struct rdma_restrack_entry res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1820 | }; |
| 1821 | |
| 1822 | struct ib_mw { |
| 1823 | struct ib_device *device; |
| 1824 | struct ib_pd *pd; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 1825 | struct ib_uobject *uobject; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1826 | u32 rkey; |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 1827 | enum ib_mw_type type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1828 | }; |
| 1829 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1830 | /* Supported steering options */ |
| 1831 | enum ib_flow_attr_type { |
| 1832 | /* steering according to rule specifications */ |
| 1833 | IB_FLOW_ATTR_NORMAL = 0x0, |
| 1834 | /* default unicast and multicast rule - |
| 1835 | * receive all Eth traffic which isn't steered to any QP |
| 1836 | */ |
| 1837 | IB_FLOW_ATTR_ALL_DEFAULT = 0x1, |
| 1838 | /* default multicast rule - |
| 1839 | * receive all Eth multicast traffic which isn't steered to any QP |
| 1840 | */ |
| 1841 | IB_FLOW_ATTR_MC_DEFAULT = 0x2, |
| 1842 | /* sniffer rule - receive all port traffic */ |
| 1843 | IB_FLOW_ATTR_SNIFFER = 0x3 |
| 1844 | }; |
| 1845 | |
| 1846 | /* Supported steering header types */ |
| 1847 | enum ib_flow_spec_type { |
| 1848 | /* L2 headers*/ |
Moses Reuben | 76bd23b | 2016-11-14 19:04:48 +0200 | [diff] [blame] | 1849 | IB_FLOW_SPEC_ETH = 0x20, |
| 1850 | IB_FLOW_SPEC_IB = 0x22, |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1851 | /* L3 header*/ |
Moses Reuben | 76bd23b | 2016-11-14 19:04:48 +0200 | [diff] [blame] | 1852 | IB_FLOW_SPEC_IPV4 = 0x30, |
| 1853 | IB_FLOW_SPEC_IPV6 = 0x31, |
Matan Barak | 56ab0b3 | 2018-03-28 09:27:49 +0300 | [diff] [blame] | 1854 | IB_FLOW_SPEC_ESP = 0x34, |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1855 | /* L4 headers*/ |
Moses Reuben | 76bd23b | 2016-11-14 19:04:48 +0200 | [diff] [blame] | 1856 | IB_FLOW_SPEC_TCP = 0x40, |
| 1857 | IB_FLOW_SPEC_UDP = 0x41, |
Moses Reuben | 0dbf333 | 2016-11-14 19:04:47 +0200 | [diff] [blame] | 1858 | IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, |
Ariel Levkovich | d90e5e5 | 2018-05-13 14:33:30 +0300 | [diff] [blame] | 1859 | IB_FLOW_SPEC_GRE = 0x51, |
Ariel Levkovich | b04f0f0 | 2018-05-13 14:33:32 +0300 | [diff] [blame] | 1860 | IB_FLOW_SPEC_MPLS = 0x60, |
Moses Reuben | fbf4686 | 2016-11-14 19:04:51 +0200 | [diff] [blame] | 1861 | IB_FLOW_SPEC_INNER = 0x100, |
Moses Reuben | 460d019 | 2017-01-18 14:59:48 +0200 | [diff] [blame] | 1862 | /* Actions */ |
| 1863 | IB_FLOW_SPEC_ACTION_TAG = 0x1000, |
Slava Shwartsman | 483a396 | 2017-04-03 13:13:51 +0300 | [diff] [blame] | 1864 | IB_FLOW_SPEC_ACTION_DROP = 0x1001, |
Matan Barak | 9b82844 | 2018-03-28 09:27:46 +0300 | [diff] [blame] | 1865 | IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, |
Raed Salem | 7eea23a | 2018-05-31 16:43:36 +0300 | [diff] [blame] | 1866 | IB_FLOW_SPEC_ACTION_COUNT = 0x1003, |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1867 | }; |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1868 | #define IB_FLOW_SPEC_LAYER_MASK 0xF0 |
Raed Salem | 7eea23a | 2018-05-31 16:43:36 +0300 | [diff] [blame] | 1869 | #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 |
Matan Barak | 22878db | 2013-09-01 18:39:52 +0300 | [diff] [blame] | 1870 | |
Marina Varshaver | a3100a7 | 2016-02-18 18:31:05 +0200 | [diff] [blame] | 1871 | enum ib_flow_flags { |
| 1872 | IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ |
Boris Pismenny | 21e82d3 | 2018-03-28 09:27:47 +0300 | [diff] [blame] | 1873 | IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ |
| 1874 | IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ |
Marina Varshaver | a3100a7 | 2016-02-18 18:31:05 +0200 | [diff] [blame] | 1875 | }; |
| 1876 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1877 | struct ib_flow_eth_filter { |
| 1878 | u8 dst_mac[6]; |
| 1879 | u8 src_mac[6]; |
| 1880 | __be16 ether_type; |
| 1881 | __be16 vlan_tag; |
Maor Gottlieb | 15dfbd6 | 2016-08-30 16:58:32 +0300 | [diff] [blame] | 1882 | /* Must be last */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 1883 | u8 real_sz[]; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1884 | }; |
| 1885 | |
| 1886 | struct ib_flow_spec_eth { |
Moses Reuben | fbf4686 | 2016-11-14 19:04:51 +0200 | [diff] [blame] | 1887 | u32 type; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1888 | u16 size; |
| 1889 | struct ib_flow_eth_filter val; |
| 1890 | struct ib_flow_eth_filter mask; |
| 1891 | }; |
| 1892 | |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1893 | struct ib_flow_ib_filter { |
| 1894 | __be16 dlid; |
| 1895 | __u8 sl; |
Maor Gottlieb | 15dfbd6 | 2016-08-30 16:58:32 +0300 | [diff] [blame] | 1896 | /* Must be last */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 1897 | u8 real_sz[]; |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1898 | }; |
| 1899 | |
| 1900 | struct ib_flow_spec_ib { |
Moses Reuben | fbf4686 | 2016-11-14 19:04:51 +0200 | [diff] [blame] | 1901 | u32 type; |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 1902 | u16 size; |
| 1903 | struct ib_flow_ib_filter val; |
| 1904 | struct ib_flow_ib_filter mask; |
| 1905 | }; |
| 1906 | |
Maor Gottlieb | 989a3a8 | 2016-08-30 16:58:33 +0300 | [diff] [blame] | 1907 | /* IPv4 header flags */ |
| 1908 | enum ib_ipv4_flags { |
| 1909 | IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ |
| 1910 | IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the |
| 1911 | last have this flag set */ |
| 1912 | }; |
| 1913 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1914 | struct ib_flow_ipv4_filter { |
| 1915 | __be32 src_ip; |
| 1916 | __be32 dst_ip; |
Maor Gottlieb | 989a3a8 | 2016-08-30 16:58:33 +0300 | [diff] [blame] | 1917 | u8 proto; |
| 1918 | u8 tos; |
| 1919 | u8 ttl; |
| 1920 | u8 flags; |
Maor Gottlieb | 15dfbd6 | 2016-08-30 16:58:32 +0300 | [diff] [blame] | 1921 | /* Must be last */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 1922 | u8 real_sz[]; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1923 | }; |
| 1924 | |
| 1925 | struct ib_flow_spec_ipv4 { |
Moses Reuben | fbf4686 | 2016-11-14 19:04:51 +0200 | [diff] [blame] | 1926 | u32 type; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1927 | u16 size; |
| 1928 | struct ib_flow_ipv4_filter val; |
| 1929 | struct ib_flow_ipv4_filter mask; |
| 1930 | }; |
| 1931 | |
Maor Gottlieb | 4c2aae7 | 2016-06-17 15:14:50 +0300 | [diff] [blame] | 1932 | struct ib_flow_ipv6_filter { |
| 1933 | u8 src_ip[16]; |
| 1934 | u8 dst_ip[16]; |
Maor Gottlieb | a72c6a2 | 2016-08-30 16:58:34 +0300 | [diff] [blame] | 1935 | __be32 flow_label; |
| 1936 | u8 next_hdr; |
| 1937 | u8 traffic_class; |
| 1938 | u8 hop_limit; |
Maor Gottlieb | 15dfbd6 | 2016-08-30 16:58:32 +0300 | [diff] [blame] | 1939 | /* Must be last */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 1940 | u8 real_sz[]; |
Maor Gottlieb | 4c2aae7 | 2016-06-17 15:14:50 +0300 | [diff] [blame] | 1941 | }; |
| 1942 | |
| 1943 | struct ib_flow_spec_ipv6 { |
Moses Reuben | fbf4686 | 2016-11-14 19:04:51 +0200 | [diff] [blame] | 1944 | u32 type; |
Maor Gottlieb | 4c2aae7 | 2016-06-17 15:14:50 +0300 | [diff] [blame] | 1945 | u16 size; |
| 1946 | struct ib_flow_ipv6_filter val; |
| 1947 | struct ib_flow_ipv6_filter mask; |
| 1948 | }; |
| 1949 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1950 | struct ib_flow_tcp_udp_filter { |
| 1951 | __be16 dst_port; |
| 1952 | __be16 src_port; |
Maor Gottlieb | 15dfbd6 | 2016-08-30 16:58:32 +0300 | [diff] [blame] | 1953 | /* Must be last */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 1954 | u8 real_sz[]; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1955 | }; |
| 1956 | |
| 1957 | struct ib_flow_spec_tcp_udp { |
Moses Reuben | fbf4686 | 2016-11-14 19:04:51 +0200 | [diff] [blame] | 1958 | u32 type; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 1959 | u16 size; |
| 1960 | struct ib_flow_tcp_udp_filter val; |
| 1961 | struct ib_flow_tcp_udp_filter mask; |
| 1962 | }; |
| 1963 | |
Moses Reuben | 0dbf333 | 2016-11-14 19:04:47 +0200 | [diff] [blame] | 1964 | struct ib_flow_tunnel_filter { |
| 1965 | __be32 tunnel_id; |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 1966 | u8 real_sz[]; |
Moses Reuben | 0dbf333 | 2016-11-14 19:04:47 +0200 | [diff] [blame] | 1967 | }; |
| 1968 | |
| 1969 | /* ib_flow_spec_tunnel describes the Vxlan tunnel |
| 1970 | * the tunnel_id from val has the vni value |
| 1971 | */ |
| 1972 | struct ib_flow_spec_tunnel { |
Moses Reuben | fbf4686 | 2016-11-14 19:04:51 +0200 | [diff] [blame] | 1973 | u32 type; |
Moses Reuben | 0dbf333 | 2016-11-14 19:04:47 +0200 | [diff] [blame] | 1974 | u16 size; |
| 1975 | struct ib_flow_tunnel_filter val; |
| 1976 | struct ib_flow_tunnel_filter mask; |
| 1977 | }; |
| 1978 | |
Matan Barak | 56ab0b3 | 2018-03-28 09:27:49 +0300 | [diff] [blame] | 1979 | struct ib_flow_esp_filter { |
| 1980 | __be32 spi; |
| 1981 | __be32 seq; |
| 1982 | /* Must be last */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 1983 | u8 real_sz[]; |
Matan Barak | 56ab0b3 | 2018-03-28 09:27:49 +0300 | [diff] [blame] | 1984 | }; |
| 1985 | |
| 1986 | struct ib_flow_spec_esp { |
| 1987 | u32 type; |
| 1988 | u16 size; |
| 1989 | struct ib_flow_esp_filter val; |
| 1990 | struct ib_flow_esp_filter mask; |
| 1991 | }; |
| 1992 | |
Ariel Levkovich | d90e5e5 | 2018-05-13 14:33:30 +0300 | [diff] [blame] | 1993 | struct ib_flow_gre_filter { |
| 1994 | __be16 c_ks_res0_ver; |
| 1995 | __be16 protocol; |
| 1996 | __be32 key; |
| 1997 | /* Must be last */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 1998 | u8 real_sz[]; |
Ariel Levkovich | d90e5e5 | 2018-05-13 14:33:30 +0300 | [diff] [blame] | 1999 | }; |
| 2000 | |
| 2001 | struct ib_flow_spec_gre { |
| 2002 | u32 type; |
| 2003 | u16 size; |
| 2004 | struct ib_flow_gre_filter val; |
| 2005 | struct ib_flow_gre_filter mask; |
| 2006 | }; |
| 2007 | |
Ariel Levkovich | b04f0f0 | 2018-05-13 14:33:32 +0300 | [diff] [blame] | 2008 | struct ib_flow_mpls_filter { |
| 2009 | __be32 tag; |
| 2010 | /* Must be last */ |
Gustavo A. R. Silva | 5b36132 | 2020-02-12 19:04:25 -0600 | [diff] [blame] | 2011 | u8 real_sz[]; |
Ariel Levkovich | b04f0f0 | 2018-05-13 14:33:32 +0300 | [diff] [blame] | 2012 | }; |
| 2013 | |
| 2014 | struct ib_flow_spec_mpls { |
| 2015 | u32 type; |
| 2016 | u16 size; |
| 2017 | struct ib_flow_mpls_filter val; |
| 2018 | struct ib_flow_mpls_filter mask; |
| 2019 | }; |
| 2020 | |
Moses Reuben | 460d019 | 2017-01-18 14:59:48 +0200 | [diff] [blame] | 2021 | struct ib_flow_spec_action_tag { |
| 2022 | enum ib_flow_spec_type type; |
| 2023 | u16 size; |
| 2024 | u32 tag_id; |
| 2025 | }; |
| 2026 | |
Slava Shwartsman | 483a396 | 2017-04-03 13:13:51 +0300 | [diff] [blame] | 2027 | struct ib_flow_spec_action_drop { |
| 2028 | enum ib_flow_spec_type type; |
| 2029 | u16 size; |
| 2030 | }; |
| 2031 | |
Matan Barak | 9b82844 | 2018-03-28 09:27:46 +0300 | [diff] [blame] | 2032 | struct ib_flow_spec_action_handle { |
| 2033 | enum ib_flow_spec_type type; |
| 2034 | u16 size; |
| 2035 | struct ib_flow_action *act; |
| 2036 | }; |
| 2037 | |
Raed Salem | 7eea23a | 2018-05-31 16:43:36 +0300 | [diff] [blame] | 2038 | enum ib_counters_description { |
| 2039 | IB_COUNTER_PACKETS, |
| 2040 | IB_COUNTER_BYTES, |
| 2041 | }; |
| 2042 | |
| 2043 | struct ib_flow_spec_action_count { |
| 2044 | enum ib_flow_spec_type type; |
| 2045 | u16 size; |
| 2046 | struct ib_counters *counters; |
| 2047 | }; |
| 2048 | |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 2049 | union ib_flow_spec { |
| 2050 | struct { |
Moses Reuben | fbf4686 | 2016-11-14 19:04:51 +0200 | [diff] [blame] | 2051 | u32 type; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 2052 | u16 size; |
| 2053 | }; |
| 2054 | struct ib_flow_spec_eth eth; |
Matan Barak | 240ae00 | 2013-11-07 15:25:13 +0200 | [diff] [blame] | 2055 | struct ib_flow_spec_ib ib; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 2056 | struct ib_flow_spec_ipv4 ipv4; |
| 2057 | struct ib_flow_spec_tcp_udp tcp_udp; |
Maor Gottlieb | 4c2aae7 | 2016-06-17 15:14:50 +0300 | [diff] [blame] | 2058 | struct ib_flow_spec_ipv6 ipv6; |
Moses Reuben | 0dbf333 | 2016-11-14 19:04:47 +0200 | [diff] [blame] | 2059 | struct ib_flow_spec_tunnel tunnel; |
Matan Barak | 56ab0b3 | 2018-03-28 09:27:49 +0300 | [diff] [blame] | 2060 | struct ib_flow_spec_esp esp; |
Ariel Levkovich | d90e5e5 | 2018-05-13 14:33:30 +0300 | [diff] [blame] | 2061 | struct ib_flow_spec_gre gre; |
Ariel Levkovich | b04f0f0 | 2018-05-13 14:33:32 +0300 | [diff] [blame] | 2062 | struct ib_flow_spec_mpls mpls; |
Moses Reuben | 460d019 | 2017-01-18 14:59:48 +0200 | [diff] [blame] | 2063 | struct ib_flow_spec_action_tag flow_tag; |
Slava Shwartsman | 483a396 | 2017-04-03 13:13:51 +0300 | [diff] [blame] | 2064 | struct ib_flow_spec_action_drop drop; |
Matan Barak | 9b82844 | 2018-03-28 09:27:46 +0300 | [diff] [blame] | 2065 | struct ib_flow_spec_action_handle action; |
Raed Salem | 7eea23a | 2018-05-31 16:43:36 +0300 | [diff] [blame] | 2066 | struct ib_flow_spec_action_count flow_count; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 2067 | }; |
| 2068 | |
| 2069 | struct ib_flow_attr { |
| 2070 | enum ib_flow_attr_type type; |
| 2071 | u16 size; |
| 2072 | u16 priority; |
| 2073 | u32 flags; |
| 2074 | u8 num_of_specs; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2075 | u32 port; |
Matthew Wilcox | 7654cb1 | 2018-06-07 07:57:16 -0700 | [diff] [blame] | 2076 | union ib_flow_spec flows[]; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 2077 | }; |
| 2078 | |
| 2079 | struct ib_flow { |
| 2080 | struct ib_qp *qp; |
Yishai Hadas | 6cd080a | 2018-07-23 15:25:08 +0300 | [diff] [blame] | 2081 | struct ib_device *device; |
Hadar Hen Zion | 319a441 | 2013-08-07 14:01:59 +0300 | [diff] [blame] | 2082 | struct ib_uobject *uobject; |
| 2083 | }; |
| 2084 | |
Matan Barak | 2eb9bea | 2018-03-28 09:27:45 +0300 | [diff] [blame] | 2085 | enum ib_flow_action_type { |
| 2086 | IB_FLOW_ACTION_UNSPECIFIED, |
| 2087 | IB_FLOW_ACTION_ESP = 1, |
| 2088 | }; |
| 2089 | |
| 2090 | struct ib_flow_action_attrs_esp_keymats { |
| 2091 | enum ib_uverbs_flow_action_esp_keymat protocol; |
| 2092 | union { |
| 2093 | struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; |
| 2094 | } keymat; |
| 2095 | }; |
| 2096 | |
| 2097 | struct ib_flow_action_attrs_esp_replays { |
| 2098 | enum ib_uverbs_flow_action_esp_replay protocol; |
| 2099 | union { |
| 2100 | struct ib_uverbs_flow_action_esp_replay_bmp bmp; |
| 2101 | } replay; |
| 2102 | }; |
| 2103 | |
| 2104 | enum ib_flow_action_attrs_esp_flags { |
| 2105 | /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags |
| 2106 | * This is done in order to share the same flags between user-space and |
| 2107 | * kernel and spare an unnecessary translation. |
| 2108 | */ |
| 2109 | |
| 2110 | /* Kernel flags */ |
| 2111 | IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, |
Matan Barak | 7d12f8d | 2018-03-28 09:27:48 +0300 | [diff] [blame] | 2112 | IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, |
Matan Barak | 2eb9bea | 2018-03-28 09:27:45 +0300 | [diff] [blame] | 2113 | }; |
| 2114 | |
| 2115 | struct ib_flow_spec_list { |
| 2116 | struct ib_flow_spec_list *next; |
| 2117 | union ib_flow_spec spec; |
| 2118 | }; |
| 2119 | |
| 2120 | struct ib_flow_action_attrs_esp { |
| 2121 | struct ib_flow_action_attrs_esp_keymats *keymat; |
| 2122 | struct ib_flow_action_attrs_esp_replays *replay; |
| 2123 | struct ib_flow_spec_list *encap; |
| 2124 | /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. |
| 2125 | * Value of 0 is a valid value. |
| 2126 | */ |
| 2127 | u32 esn; |
| 2128 | u32 spi; |
| 2129 | u32 seq; |
| 2130 | u32 tfc_pad; |
| 2131 | /* Use enum ib_flow_action_attrs_esp_flags */ |
| 2132 | u64 flags; |
| 2133 | u64 hard_limit_pkts; |
| 2134 | }; |
| 2135 | |
| 2136 | struct ib_flow_action { |
| 2137 | struct ib_device *device; |
| 2138 | struct ib_uobject *uobject; |
| 2139 | enum ib_flow_action_type type; |
| 2140 | atomic_t usecnt; |
| 2141 | }; |
| 2142 | |
Leon Romanovsky | e26e7b8 | 2019-10-29 08:27:45 +0200 | [diff] [blame] | 2143 | struct ib_mad; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2144 | |
| 2145 | enum ib_process_mad_flags { |
| 2146 | IB_MAD_IGNORE_MKEY = 1, |
| 2147 | IB_MAD_IGNORE_BKEY = 2, |
| 2148 | IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY |
| 2149 | }; |
| 2150 | |
| 2151 | enum ib_mad_result { |
| 2152 | IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ |
| 2153 | IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ |
| 2154 | IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ |
| 2155 | IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ |
| 2156 | }; |
| 2157 | |
Jack Wang | 21d6454 | 2017-01-17 10:11:12 +0100 | [diff] [blame] | 2158 | struct ib_port_cache { |
Daniel Jurgens | 883c71f | 2017-05-19 15:48:51 +0300 | [diff] [blame] | 2159 | u64 subnet_prefix; |
Jack Wang | 21d6454 | 2017-01-17 10:11:12 +0100 | [diff] [blame] | 2160 | struct ib_pkey_cache *pkey; |
| 2161 | struct ib_gid_table *gid; |
| 2162 | u8 lmc; |
| 2163 | enum ib_port_state port_state; |
| 2164 | }; |
| 2165 | |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 2166 | struct ib_port_immutable { |
| 2167 | int pkey_tbl_len; |
| 2168 | int gid_tbl_len; |
Ira Weiny | f9b22e3 | 2015-05-13 20:02:59 -0400 | [diff] [blame] | 2169 | u32 core_cap_flags; |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 2170 | u32 max_mad_size; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 2171 | }; |
| 2172 | |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 2173 | struct ib_port_data { |
Jason Gunthorpe | 324e227 | 2019-02-12 21:12:51 -0700 | [diff] [blame] | 2174 | struct ib_device *ib_dev; |
| 2175 | |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 2176 | struct ib_port_immutable immutable; |
| 2177 | |
| 2178 | spinlock_t pkey_list_lock; |
Anand Khoje | 84dcd8c | 2021-06-16 21:15:08 +0530 | [diff] [blame] | 2179 | |
| 2180 | spinlock_t netdev_lock; |
| 2181 | |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 2182 | struct list_head pkey_list; |
Jason Gunthorpe | 8faea9f | 2019-02-12 21:12:49 -0700 | [diff] [blame] | 2183 | |
| 2184 | struct ib_port_cache cache; |
Jason Gunthorpe | c2261dd | 2019-02-12 21:12:50 -0700 | [diff] [blame] | 2185 | |
Jason Gunthorpe | 324e227 | 2019-02-12 21:12:51 -0700 | [diff] [blame] | 2186 | struct net_device __rcu *netdev; |
| 2187 | struct hlist_node ndev_hash_link; |
Mark Zhang | 413d334 | 2019-07-02 13:02:34 +0300 | [diff] [blame] | 2188 | struct rdma_port_counter port_counter; |
Jason Gunthorpe | d8a5883 | 2021-06-11 19:00:21 +0300 | [diff] [blame] | 2189 | struct ib_port *sysfs; |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 2190 | }; |
| 2191 | |
Vishwanathapura, Niranjana | 2fc7757 | 2017-04-12 20:29:20 -0700 | [diff] [blame] | 2192 | /* rdma netdev type - specifies protocol type */ |
| 2193 | enum rdma_netdev_t { |
Niranjana Vishwanathapura | f0ad83ac | 2017-04-10 11:22:25 +0300 | [diff] [blame] | 2194 | RDMA_NETDEV_OPA_VNIC, |
| 2195 | RDMA_NETDEV_IPOIB, |
Vishwanathapura, Niranjana | 2fc7757 | 2017-04-12 20:29:20 -0700 | [diff] [blame] | 2196 | }; |
| 2197 | |
| 2198 | /** |
| 2199 | * struct rdma_netdev - rdma netdev |
| 2200 | * For cases where netstack interfacing is required. |
| 2201 | */ |
| 2202 | struct rdma_netdev { |
| 2203 | void *clnt_priv; |
| 2204 | struct ib_device *hca; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2205 | u32 port_num; |
Gary Leshner | d99dc60 | 2020-05-11 12:05:48 -0400 | [diff] [blame] | 2206 | int mtu; |
Vishwanathapura, Niranjana | 2fc7757 | 2017-04-12 20:29:20 -0700 | [diff] [blame] | 2207 | |
Jason Gunthorpe | 9f49a5b | 2018-07-29 11:34:56 +0300 | [diff] [blame] | 2208 | /* |
| 2209 | * cleanup function must be specified. |
| 2210 | * FIXME: This is only used for OPA_VNIC and that usage should be |
| 2211 | * removed too. |
| 2212 | */ |
Niranjana Vishwanathapura | 8e95960 | 2017-06-30 13:14:46 -0700 | [diff] [blame] | 2213 | void (*free_rdma_netdev)(struct net_device *netdev); |
| 2214 | |
Vishwanathapura, Niranjana | 2fc7757 | 2017-04-12 20:29:20 -0700 | [diff] [blame] | 2215 | /* control functions */ |
| 2216 | void (*set_id)(struct net_device *netdev, int id); |
Niranjana Vishwanathapura | f0ad83ac | 2017-04-10 11:22:25 +0300 | [diff] [blame] | 2217 | /* send packet */ |
| 2218 | int (*send)(struct net_device *dev, struct sk_buff *skb, |
| 2219 | struct ib_ah *address, u32 dqpn); |
| 2220 | /* multicast */ |
| 2221 | int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, |
| 2222 | union ib_gid *gid, u16 mlid, |
| 2223 | int set_qkey, u32 qkey); |
| 2224 | int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, |
| 2225 | union ib_gid *gid, u16 mlid); |
Mike Marciniszyn | 042a00f | 2021-03-29 09:54:08 -0400 | [diff] [blame] | 2226 | /* timeout */ |
| 2227 | void (*tx_timeout)(struct net_device *dev, unsigned int txqueue); |
Vishwanathapura, Niranjana | 2fc7757 | 2017-04-12 20:29:20 -0700 | [diff] [blame] | 2228 | }; |
| 2229 | |
Denis Drozdov | f6a8a19 | 2018-08-14 14:08:51 +0300 | [diff] [blame] | 2230 | struct rdma_netdev_alloc_params { |
| 2231 | size_t sizeof_priv; |
| 2232 | unsigned int txqs; |
| 2233 | unsigned int rxqs; |
| 2234 | void *param; |
| 2235 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2236 | int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num, |
Denis Drozdov | f6a8a19 | 2018-08-14 14:08:51 +0300 | [diff] [blame] | 2237 | struct net_device *netdev, void *param); |
| 2238 | }; |
| 2239 | |
Erez Alfasi | a3de94e | 2019-10-16 09:23:05 +0300 | [diff] [blame] | 2240 | struct ib_odp_counters { |
| 2241 | atomic64_t faults; |
| 2242 | atomic64_t invalidations; |
Maor Gottlieb | d473f4d | 2020-06-21 13:41:47 +0300 | [diff] [blame] | 2243 | atomic64_t prefetch; |
Erez Alfasi | a3de94e | 2019-10-16 09:23:05 +0300 | [diff] [blame] | 2244 | }; |
| 2245 | |
Raed Salem | fa9b180 | 2018-05-31 16:43:31 +0300 | [diff] [blame] | 2246 | struct ib_counters { |
| 2247 | struct ib_device *device; |
| 2248 | struct ib_uobject *uobject; |
| 2249 | /* num of objects attached */ |
| 2250 | atomic_t usecnt; |
| 2251 | }; |
| 2252 | |
Raed Salem | 51d7a53 | 2018-05-31 16:43:33 +0300 | [diff] [blame] | 2253 | struct ib_counters_read_attr { |
| 2254 | u64 *counters_buff; |
| 2255 | u32 ncounters; |
| 2256 | u32 flags; /* use enum ib_read_counters_flags */ |
| 2257 | }; |
| 2258 | |
Matan Barak | 2eb9bea | 2018-03-28 09:27:45 +0300 | [diff] [blame] | 2259 | struct uverbs_attr_bundle; |
Kamal Heib | dd05cb8 | 2019-04-29 14:59:06 +0300 | [diff] [blame] | 2260 | struct iw_cm_id; |
| 2261 | struct iw_cm_conn_param; |
Matan Barak | 2eb9bea | 2018-03-28 09:27:45 +0300 | [diff] [blame] | 2262 | |
Leon Romanovsky | 30471d4 | 2019-02-03 14:55:50 +0200 | [diff] [blame] | 2263 | #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ |
| 2264 | .size_##ib_struct = \ |
| 2265 | (sizeof(struct drv_struct) + \ |
| 2266 | BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ |
| 2267 | BUILD_BUG_ON_ZERO( \ |
| 2268 | !__same_type(((struct drv_struct *)NULL)->member, \ |
| 2269 | struct ib_struct))) |
| 2270 | |
Leon Romanovsky | 514aee6 | 2021-07-23 14:39:50 +0300 | [diff] [blame] | 2271 | #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ |
| 2272 | ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \ |
| 2273 | gfp, false)) |
| 2274 | |
| 2275 | #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \ |
| 2276 | ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \ |
| 2277 | GFP_KERNEL, true)) |
Leon Romanovsky | f631603 | 2019-03-28 15:12:58 +0200 | [diff] [blame] | 2278 | |
Leon Romanovsky | 30471d4 | 2019-02-03 14:55:50 +0200 | [diff] [blame] | 2279 | #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ |
Leon Romanovsky | f631603 | 2019-03-28 15:12:58 +0200 | [diff] [blame] | 2280 | rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) |
Leon Romanovsky | 30471d4 | 2019-02-03 14:55:50 +0200 | [diff] [blame] | 2281 | |
| 2282 | #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct |
| 2283 | |
Michal Kalderon | 3411f9f | 2019-10-30 11:44:11 +0200 | [diff] [blame] | 2284 | struct rdma_user_mmap_entry { |
| 2285 | struct kref ref; |
| 2286 | struct ib_ucontext *ucontext; |
| 2287 | unsigned long start_pgoff; |
| 2288 | size_t npages; |
| 2289 | bool driver_removed; |
| 2290 | }; |
| 2291 | |
| 2292 | /* Return the offset (in bytes) the user should pass to libc's mmap() */ |
| 2293 | static inline u64 |
| 2294 | rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry) |
| 2295 | { |
| 2296 | return (u64)entry->start_pgoff << PAGE_SHIFT; |
| 2297 | } |
| 2298 | |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2299 | /** |
| 2300 | * struct ib_device_ops - InfiniBand device operations |
| 2301 | * This structure defines all the InfiniBand device operations, providers will |
| 2302 | * need to define the supported operations, otherwise they will be set to null. |
| 2303 | */ |
| 2304 | struct ib_device_ops { |
Jason Gunthorpe | 7a15414 | 2019-06-05 14:39:26 -0300 | [diff] [blame] | 2305 | struct module *owner; |
Jason Gunthorpe | b9560a4 | 2019-06-05 14:39:24 -0300 | [diff] [blame] | 2306 | enum rdma_driver_id driver_id; |
Jason Gunthorpe | 72c6ec1 | 2019-06-05 14:39:25 -0300 | [diff] [blame] | 2307 | u32 uverbs_abi_ver; |
Jason Gunthorpe | 8f71bb0 | 2019-06-13 21:38:19 -0300 | [diff] [blame] | 2308 | unsigned int uverbs_no_driver_id_binding:1; |
Jason Gunthorpe | b9560a4 | 2019-06-05 14:39:24 -0300 | [diff] [blame] | 2309 | |
Jason Gunthorpe | 915e4af | 2021-06-11 19:00:34 +0300 | [diff] [blame] | 2310 | /* |
| 2311 | * NOTE: New drivers should not make use of device_group; instead new |
| 2312 | * device parameter should be exposed via netlink command. This |
| 2313 | * mechanism exists only for existing drivers. |
| 2314 | */ |
| 2315 | const struct attribute_group *device_group; |
Jason Gunthorpe | d7407d1 | 2021-06-11 19:00:32 +0300 | [diff] [blame] | 2316 | const struct attribute_group **port_groups; |
| 2317 | |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2318 | int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, |
| 2319 | const struct ib_send_wr **bad_send_wr); |
| 2320 | int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, |
| 2321 | const struct ib_recv_wr **bad_recv_wr); |
| 2322 | void (*drain_rq)(struct ib_qp *qp); |
| 2323 | void (*drain_sq)(struct ib_qp *qp); |
| 2324 | int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); |
| 2325 | int (*peek_cq)(struct ib_cq *cq, int wc_cnt); |
| 2326 | int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2327 | int (*post_srq_recv)(struct ib_srq *srq, |
| 2328 | const struct ib_recv_wr *recv_wr, |
| 2329 | const struct ib_recv_wr **bad_recv_wr); |
| 2330 | int (*process_mad)(struct ib_device *device, int process_mad_flags, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2331 | u32 port_num, const struct ib_wc *in_wc, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2332 | const struct ib_grh *in_grh, |
Leon Romanovsky | e26e7b8 | 2019-10-29 08:27:45 +0200 | [diff] [blame] | 2333 | const struct ib_mad *in_mad, struct ib_mad *out_mad, |
| 2334 | size_t *out_mad_size, u16 *out_mad_pkey_index); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2335 | int (*query_device)(struct ib_device *device, |
| 2336 | struct ib_device_attr *device_attr, |
| 2337 | struct ib_udata *udata); |
| 2338 | int (*modify_device)(struct ib_device *device, int device_modify_mask, |
| 2339 | struct ib_device_modify *device_modify); |
| 2340 | void (*get_dev_fw_str)(struct ib_device *device, char *str); |
| 2341 | const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, |
| 2342 | int comp_vector); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2343 | int (*query_port)(struct ib_device *device, u32 port_num, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2344 | struct ib_port_attr *port_attr); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2345 | int (*modify_port)(struct ib_device *device, u32 port_num, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2346 | int port_modify_mask, |
| 2347 | struct ib_port_modify *port_modify); |
| 2348 | /** |
| 2349 | * The following mandatory functions are used only at device |
| 2350 | * registration. Keep functions such as these at the end of this |
| 2351 | * structure to avoid cache line misses when accessing struct ib_device |
| 2352 | * in fast paths. |
| 2353 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2354 | int (*get_port_immutable)(struct ib_device *device, u32 port_num, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2355 | struct ib_port_immutable *immutable); |
| 2356 | enum rdma_link_layer (*get_link_layer)(struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2357 | u32 port_num); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2358 | /** |
| 2359 | * When calling get_netdev, the HW vendor's driver should return the |
| 2360 | * net device of device @device at port @port_num or NULL if such |
| 2361 | * a net device doesn't exist. The vendor driver should call dev_hold |
| 2362 | * on this net device. The HW vendor's device driver must guarantee |
| 2363 | * that this function returns NULL before the net device has finished |
| 2364 | * NETDEV_UNREGISTER state. |
| 2365 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2366 | struct net_device *(*get_netdev)(struct ib_device *device, |
| 2367 | u32 port_num); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2368 | /** |
| 2369 | * rdma netdev operation |
| 2370 | * |
| 2371 | * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params |
| 2372 | * must return -EOPNOTSUPP if it doesn't support the specified type. |
| 2373 | */ |
| 2374 | struct net_device *(*alloc_rdma_netdev)( |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2375 | struct ib_device *device, u32 port_num, enum rdma_netdev_t type, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2376 | const char *name, unsigned char name_assign_type, |
| 2377 | void (*setup)(struct net_device *)); |
| 2378 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2379 | int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2380 | enum rdma_netdev_t type, |
| 2381 | struct rdma_netdev_alloc_params *params); |
| 2382 | /** |
| 2383 | * query_gid should be return GID value for @device, when @port_num |
| 2384 | * link layer is either IB or iWarp. It is no-op if @port_num port |
| 2385 | * is RoCE link layer. |
| 2386 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2387 | int (*query_gid)(struct ib_device *device, u32 port_num, int index, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2388 | union ib_gid *gid); |
| 2389 | /** |
| 2390 | * When calling add_gid, the HW vendor's driver should add the gid |
| 2391 | * of device of port at gid index available at @attr. Meta-info of |
| 2392 | * that gid (for example, the network device related to this gid) is |
| 2393 | * available at @attr. @context allows the HW vendor driver to store |
| 2394 | * extra information together with a GID entry. The HW vendor driver may |
| 2395 | * allocate memory to contain this information and store it in @context |
| 2396 | * when a new GID entry is written to. Params are consistent until the |
| 2397 | * next call of add_gid or delete_gid. The function should return 0 on |
| 2398 | * success or error otherwise. The function could be called |
| 2399 | * concurrently for different ports. This function is only called when |
| 2400 | * roce_gid_table is used. |
| 2401 | */ |
| 2402 | int (*add_gid)(const struct ib_gid_attr *attr, void **context); |
| 2403 | /** |
| 2404 | * When calling del_gid, the HW vendor's driver should delete the |
| 2405 | * gid of device @device at gid index gid_index of port port_num |
| 2406 | * available in @attr. |
| 2407 | * Upon the deletion of a GID entry, the HW vendor must free any |
| 2408 | * allocated memory. The caller will clear @context afterwards. |
| 2409 | * This function is only called when roce_gid_table is used. |
| 2410 | */ |
| 2411 | int (*del_gid)(const struct ib_gid_attr *attr, void **context); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2412 | int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2413 | u16 *pkey); |
Leon Romanovsky | a2a074e | 2019-02-12 20:39:16 +0200 | [diff] [blame] | 2414 | int (*alloc_ucontext)(struct ib_ucontext *context, |
| 2415 | struct ib_udata *udata); |
| 2416 | void (*dealloc_ucontext)(struct ib_ucontext *context); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2417 | int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); |
Michal Kalderon | 3411f9f | 2019-10-30 11:44:11 +0200 | [diff] [blame] | 2418 | /** |
| 2419 | * This will be called once refcount of an entry in mmap_xa reaches |
| 2420 | * zero. The type of the memory that was mapped may differ between |
| 2421 | * entries and is opaque to the rdma_user_mmap interface. |
| 2422 | * Therefore needs to be implemented by the driver in mmap_free. |
| 2423 | */ |
| 2424 | void (*mmap_free)(struct rdma_user_mmap_entry *entry); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2425 | void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); |
Shamir Rabinovitch | ff23dfa | 2019-03-31 19:10:07 +0300 | [diff] [blame] | 2426 | int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); |
Leon Romanovsky | 91a7c58 | 2020-09-07 15:09:13 +0300 | [diff] [blame] | 2427 | int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); |
Maor Gottlieb | fa5d010 | 2020-04-30 22:21:42 +0300 | [diff] [blame] | 2428 | int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr, |
| 2429 | struct ib_udata *udata); |
Jason Gunthorpe | 676a80a | 2020-10-03 20:20:11 -0300 | [diff] [blame] | 2430 | int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr, |
| 2431 | struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2432 | int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
| 2433 | int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
Leon Romanovsky | 9a9ebf8 | 2020-09-07 15:09:14 +0300 | [diff] [blame] | 2434 | int (*destroy_ah)(struct ib_ah *ah, u32 flags); |
Leon Romanovsky | 68e326d | 2019-04-03 16:42:43 +0300 | [diff] [blame] | 2435 | int (*create_srq)(struct ib_srq *srq, |
| 2436 | struct ib_srq_init_attr *srq_init_attr, |
| 2437 | struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2438 | int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr, |
| 2439 | enum ib_srq_attr_mask srq_attr_mask, |
| 2440 | struct ib_udata *udata); |
| 2441 | int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); |
Leon Romanovsky | 119181d | 2020-09-07 15:09:16 +0300 | [diff] [blame] | 2442 | int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); |
Leon Romanovsky | 514aee6 | 2021-07-23 14:39:50 +0300 | [diff] [blame] | 2443 | int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr, |
| 2444 | struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2445 | int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
| 2446 | int qp_attr_mask, struct ib_udata *udata); |
| 2447 | int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
| 2448 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 2449 | int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 2450 | int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, |
| 2451 | struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2452 | int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
Leon Romanovsky | 43d781b | 2020-09-07 15:09:18 +0300 | [diff] [blame] | 2453 | int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2454 | int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); |
| 2455 | struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); |
| 2456 | struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, |
| 2457 | u64 virt_addr, int mr_access_flags, |
| 2458 | struct ib_udata *udata); |
Jianxin Xiong | 3bc489e | 2020-12-15 13:27:14 -0800 | [diff] [blame] | 2459 | struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset, |
| 2460 | u64 length, u64 virt_addr, int fd, |
| 2461 | int mr_access_flags, |
| 2462 | struct ib_udata *udata); |
Jason Gunthorpe | 6e0954b | 2020-11-30 09:58:37 +0200 | [diff] [blame] | 2463 | struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, |
| 2464 | u64 length, u64 virt_addr, |
| 2465 | int mr_access_flags, struct ib_pd *pd, |
| 2466 | struct ib_udata *udata); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 2467 | int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2468 | struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, |
Gal Pressman | 42a3b15 | 2020-07-06 15:03:43 +0300 | [diff] [blame] | 2469 | u32 max_num_sg); |
Israel Rukshin | 26bc7ea | 2019-06-11 18:52:39 +0300 | [diff] [blame] | 2470 | struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd, |
| 2471 | u32 max_num_data_sg, |
| 2472 | u32 max_num_meta_sg); |
Moni Shoua | ad8a449 | 2018-12-11 13:37:52 +0200 | [diff] [blame] | 2473 | int (*advise_mr)(struct ib_pd *pd, |
| 2474 | enum ib_uverbs_advise_mr_advice advice, u32 flags, |
| 2475 | struct ib_sge *sg_list, u32 num_sge, |
| 2476 | struct uverbs_attr_bundle *attrs); |
Avihai Horon | 1477d44 | 2021-06-09 14:05:03 +0300 | [diff] [blame] | 2477 | |
| 2478 | /* |
| 2479 | * Kernel users should universally support relaxed ordering (RO), as |
| 2480 | * they are designed to read data only after observing the CQE and use |
| 2481 | * the DMA API correctly. |
| 2482 | * |
| 2483 | * Some drivers implicitly enable RO if platform supports it. |
| 2484 | */ |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2485 | int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
| 2486 | unsigned int *sg_offset); |
| 2487 | int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, |
| 2488 | struct ib_mr_status *mr_status); |
Leon Romanovsky | d18bb3e | 2020-09-02 11:16:22 +0300 | [diff] [blame] | 2489 | int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2490 | int (*dealloc_mw)(struct ib_mw *mw); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2491 | int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
| 2492 | int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
Leon Romanovsky | 28ad5f65 | 2020-06-30 13:18:54 +0300 | [diff] [blame] | 2493 | int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); |
Leon Romanovsky | d0c45c8 | 2020-09-07 15:09:19 +0300 | [diff] [blame] | 2494 | int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2495 | struct ib_flow *(*create_flow)(struct ib_qp *qp, |
| 2496 | struct ib_flow_attr *flow_attr, |
Leon Romanovsky | d667374 | 2020-07-30 11:12:35 +0300 | [diff] [blame] | 2497 | struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2498 | int (*destroy_flow)(struct ib_flow *flow_id); |
| 2499 | struct ib_flow_action *(*create_flow_action_esp)( |
| 2500 | struct ib_device *device, |
| 2501 | const struct ib_flow_action_attrs_esp *attr, |
| 2502 | struct uverbs_attr_bundle *attrs); |
| 2503 | int (*destroy_flow_action)(struct ib_flow_action *action); |
| 2504 | int (*modify_flow_action_esp)( |
| 2505 | struct ib_flow_action *action, |
| 2506 | const struct ib_flow_action_attrs_esp *attr, |
| 2507 | struct uverbs_attr_bundle *attrs); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2508 | int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2509 | int state); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2510 | int (*get_vf_config)(struct ib_device *device, int vf, u32 port, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2511 | struct ifla_vf_info *ivf); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2512 | int (*get_vf_stats)(struct ib_device *device, int vf, u32 port, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2513 | struct ifla_vf_stats *stats); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2514 | int (*get_vf_guid)(struct ib_device *device, int vf, u32 port, |
Danit Goldberg | bfcb3c5d | 2019-11-06 15:08:32 +0200 | [diff] [blame] | 2515 | struct ifla_vf_guid *node_guid, |
| 2516 | struct ifla_vf_guid *port_guid); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2517 | int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid, |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2518 | int type); |
| 2519 | struct ib_wq *(*create_wq)(struct ib_pd *pd, |
| 2520 | struct ib_wq_init_attr *init_attr, |
| 2521 | struct ib_udata *udata); |
Leon Romanovsky | add5353 | 2020-09-07 15:09:20 +0300 | [diff] [blame] | 2522 | int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2523 | int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, |
| 2524 | u32 wq_attr_mask, struct ib_udata *udata); |
Leon Romanovsky | c0a6b5e | 2020-09-02 11:16:23 +0300 | [diff] [blame] | 2525 | int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table, |
| 2526 | struct ib_rwq_ind_table_init_attr *init_attr, |
| 2527 | struct ib_udata *udata); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2528 | int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); |
| 2529 | struct ib_dm *(*alloc_dm)(struct ib_device *device, |
| 2530 | struct ib_ucontext *context, |
| 2531 | struct ib_dm_alloc_attr *attr, |
| 2532 | struct uverbs_attr_bundle *attrs); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 2533 | int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2534 | struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, |
| 2535 | struct ib_dm_mr_attr *attr, |
| 2536 | struct uverbs_attr_bundle *attrs); |
Leon Romanovsky | 3b023e1 | 2020-06-30 13:18:52 +0300 | [diff] [blame] | 2537 | int (*create_counters)(struct ib_counters *counters, |
| 2538 | struct uverbs_attr_bundle *attrs); |
Leon Romanovsky | 71ff3f6 | 2020-09-07 15:09:21 +0300 | [diff] [blame] | 2539 | int (*destroy_counters)(struct ib_counters *counters); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2540 | int (*read_counters)(struct ib_counters *counters, |
| 2541 | struct ib_counters_read_attr *counters_read_attr, |
| 2542 | struct uverbs_attr_bundle *attrs); |
Max Gurtovoy | 2cdfcdd | 2019-06-11 18:52:40 +0300 | [diff] [blame] | 2543 | int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg, |
| 2544 | int data_sg_nents, unsigned int *data_sg_offset, |
| 2545 | struct scatterlist *meta_sg, int meta_sg_nents, |
| 2546 | unsigned int *meta_sg_offset); |
| 2547 | |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2548 | /** |
Jason Gunthorpe | 4b5f4d3 | 2021-06-11 19:00:20 +0300 | [diff] [blame] | 2549 | * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and |
| 2550 | * fill in the driver initialized data. The struct is kfree()'ed by |
| 2551 | * the sysfs core when the device is removed. A lifespan of -1 in the |
| 2552 | * return struct tells the core to set a default lifespan. |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2553 | */ |
Jason Gunthorpe | 4b5f4d3 | 2021-06-11 19:00:20 +0300 | [diff] [blame] | 2554 | struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device); |
| 2555 | struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device, |
| 2556 | u32 port_num); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2557 | /** |
| 2558 | * get_hw_stats - Fill in the counter value(s) in the stats struct. |
| 2559 | * @index - The index in the value array we wish to have updated, or |
| 2560 | * num_counters if we want all stats updated |
| 2561 | * Return codes - |
| 2562 | * < 0 - Error, no counters updated |
| 2563 | * index - Updated the single counter pointed to by index |
| 2564 | * num_counters - Updated all counters (will reset the timestamp |
| 2565 | * and prevent further calls for lifespan milliseconds) |
| 2566 | * Drivers are allowed to update all counters in leiu of just the |
| 2567 | * one given in index at their option |
| 2568 | */ |
| 2569 | int (*get_hw_stats)(struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2570 | struct rdma_hw_stats *stats, u32 port, int index); |
Jason Gunthorpe | d7407d1 | 2021-06-11 19:00:32 +0300 | [diff] [blame] | 2571 | |
Leon Romanovsky | 02da375 | 2019-01-30 12:49:02 +0200 | [diff] [blame] | 2572 | /** |
| 2573 | * Allows rdma drivers to add their own restrack attributes. |
| 2574 | */ |
Maor Gottlieb | f443452 | 2020-06-23 14:30:36 +0300 | [diff] [blame] | 2575 | int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); |
Maor Gottlieb | 65959522 | 2020-06-23 14:30:40 +0300 | [diff] [blame] | 2576 | int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr); |
Maor Gottlieb | 9e2a187 | 2020-06-23 14:30:37 +0300 | [diff] [blame] | 2577 | int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); |
Maor Gottlieb | 65959522 | 2020-06-23 14:30:40 +0300 | [diff] [blame] | 2578 | int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq); |
Maor Gottlieb | 5cc3411 | 2020-06-23 14:30:38 +0300 | [diff] [blame] | 2579 | int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); |
Maor Gottlieb | 65959522 | 2020-06-23 14:30:40 +0300 | [diff] [blame] | 2580 | int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp); |
Maor Gottlieb | 211cd94 | 2020-06-23 14:30:39 +0300 | [diff] [blame] | 2581 | int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); |
Leon Romanovsky | 21a428a | 2019-02-03 14:55:51 +0200 | [diff] [blame] | 2582 | |
Jason Gunthorpe | d089989 | 2019-02-12 21:12:53 -0700 | [diff] [blame] | 2583 | /* Device lifecycle callbacks */ |
| 2584 | /* |
Jason Gunthorpe | ca22354 | 2019-02-12 21:12:56 -0700 | [diff] [blame] | 2585 | * Called after the device becomes registered, before clients are |
| 2586 | * attached |
| 2587 | */ |
| 2588 | int (*enable_driver)(struct ib_device *dev); |
| 2589 | /* |
Jason Gunthorpe | d089989 | 2019-02-12 21:12:53 -0700 | [diff] [blame] | 2590 | * This is called as part of ib_dealloc_device(). |
| 2591 | */ |
| 2592 | void (*dealloc_driver)(struct ib_device *dev); |
| 2593 | |
Kamal Heib | dd05cb8 | 2019-04-29 14:59:06 +0300 | [diff] [blame] | 2594 | /* iWarp CM callbacks */ |
| 2595 | void (*iw_add_ref)(struct ib_qp *qp); |
| 2596 | void (*iw_rem_ref)(struct ib_qp *qp); |
| 2597 | struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn); |
| 2598 | int (*iw_connect)(struct iw_cm_id *cm_id, |
| 2599 | struct iw_cm_conn_param *conn_param); |
| 2600 | int (*iw_accept)(struct iw_cm_id *cm_id, |
| 2601 | struct iw_cm_conn_param *conn_param); |
| 2602 | int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata, |
| 2603 | u8 pdata_len); |
| 2604 | int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog); |
| 2605 | int (*iw_destroy_listen)(struct iw_cm_id *cm_id); |
Mark Zhang | 99fa331 | 2019-07-02 13:02:35 +0300 | [diff] [blame] | 2606 | /** |
| 2607 | * counter_bind_qp - Bind a QP to a counter. |
| 2608 | * @counter - The counter to be bound. If counter->id is zero then |
| 2609 | * the driver needs to allocate a new counter and set counter->id |
| 2610 | */ |
| 2611 | int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp); |
| 2612 | /** |
| 2613 | * counter_unbind_qp - Unbind the qp from the dynamically-allocated |
| 2614 | * counter and bind it onto the default one |
| 2615 | */ |
| 2616 | int (*counter_unbind_qp)(struct ib_qp *qp); |
| 2617 | /** |
| 2618 | * counter_dealloc -De-allocate the hw counter |
| 2619 | */ |
| 2620 | int (*counter_dealloc)(struct rdma_counter *counter); |
Mark Zhang | c4ffee7 | 2019-07-02 13:02:40 +0300 | [diff] [blame] | 2621 | /** |
| 2622 | * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in |
| 2623 | * the driver initialized data. |
| 2624 | */ |
| 2625 | struct rdma_hw_stats *(*counter_alloc_stats)( |
| 2626 | struct rdma_counter *counter); |
| 2627 | /** |
| 2628 | * counter_update_stats - Query the stats value of this counter |
| 2629 | */ |
| 2630 | int (*counter_update_stats)(struct rdma_counter *counter); |
Kamal Heib | dd05cb8 | 2019-04-29 14:59:06 +0300 | [diff] [blame] | 2631 | |
Erez Alfasi | 4061ff7 | 2019-10-16 09:23:08 +0300 | [diff] [blame] | 2632 | /** |
| 2633 | * Allows rdma drivers to add their own restrack attributes |
| 2634 | * dumped via 'rdma stat' iproute2 command. |
| 2635 | */ |
Maor Gottlieb | f443452 | 2020-06-23 14:30:36 +0300 | [diff] [blame] | 2636 | int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); |
Erez Alfasi | 4061ff7 | 2019-10-16 09:23:08 +0300 | [diff] [blame] | 2637 | |
Yishai Hadas | 1c8fb1e | 2020-06-30 12:39:12 +0300 | [diff] [blame] | 2638 | /* query driver for its ucontext properties */ |
| 2639 | int (*query_ucontext)(struct ib_ucontext *context, |
| 2640 | struct uverbs_attr_bundle *attrs); |
| 2641 | |
Leon Romanovsky | 514aee6 | 2021-07-23 14:39:50 +0300 | [diff] [blame] | 2642 | /* |
| 2643 | * Provide NUMA node. This API exists for rdmavt/hfi1 only. |
| 2644 | * Everyone else relies on Linux memory management model. |
| 2645 | */ |
| 2646 | int (*get_numa_node)(struct ib_device *dev); |
| 2647 | |
Leon Romanovsky | d345691 | 2019-04-03 16:42:42 +0300 | [diff] [blame] | 2648 | DECLARE_RDMA_OBJ_SIZE(ib_ah); |
Leon Romanovsky | 3b023e1 | 2020-06-30 13:18:52 +0300 | [diff] [blame] | 2649 | DECLARE_RDMA_OBJ_SIZE(ib_counters); |
Leon Romanovsky | e39afe3 | 2019-05-28 14:37:29 +0300 | [diff] [blame] | 2650 | DECLARE_RDMA_OBJ_SIZE(ib_cq); |
Leon Romanovsky | d18bb3e | 2020-09-02 11:16:22 +0300 | [diff] [blame] | 2651 | DECLARE_RDMA_OBJ_SIZE(ib_mw); |
Leon Romanovsky | 21a428a | 2019-02-03 14:55:51 +0200 | [diff] [blame] | 2652 | DECLARE_RDMA_OBJ_SIZE(ib_pd); |
Leon Romanovsky | 514aee6 | 2021-07-23 14:39:50 +0300 | [diff] [blame] | 2653 | DECLARE_RDMA_OBJ_SIZE(ib_qp); |
Leon Romanovsky | c0a6b5e | 2020-09-02 11:16:23 +0300 | [diff] [blame] | 2654 | DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table); |
Leon Romanovsky | 68e326d | 2019-04-03 16:42:43 +0300 | [diff] [blame] | 2655 | DECLARE_RDMA_OBJ_SIZE(ib_srq); |
Leon Romanovsky | a2a074e | 2019-02-12 20:39:16 +0200 | [diff] [blame] | 2656 | DECLARE_RDMA_OBJ_SIZE(ib_ucontext); |
Leon Romanovsky | 28ad5f65 | 2020-06-30 13:18:54 +0300 | [diff] [blame] | 2657 | DECLARE_RDMA_OBJ_SIZE(ib_xrcd); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2658 | }; |
| 2659 | |
Parav Pandit | cebe556 | 2019-02-26 13:56:11 +0200 | [diff] [blame] | 2660 | struct ib_core_device { |
| 2661 | /* device must be the first element in structure until, |
| 2662 | * union of ib_core_device and device exists in ib_device. |
| 2663 | */ |
| 2664 | struct device dev; |
Parav Pandit | 4e0f7b9 | 2019-02-26 13:56:13 +0200 | [diff] [blame] | 2665 | possible_net_t rdma_net; |
Parav Pandit | cebe556 | 2019-02-26 13:56:11 +0200 | [diff] [blame] | 2666 | struct kobject *ports_kobj; |
| 2667 | struct list_head port_list; |
| 2668 | struct ib_device *owner; /* reach back to owner ib_device */ |
| 2669 | }; |
Leon Romanovsky | 41eda65 | 2019-02-18 22:25:47 +0200 | [diff] [blame] | 2670 | |
Parav Pandit | cebe556 | 2019-02-26 13:56:11 +0200 | [diff] [blame] | 2671 | struct rdma_restrack_root; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2672 | struct ib_device { |
Bart Van Assche | 0957c29 | 2017-03-07 22:56:53 +0000 | [diff] [blame] | 2673 | /* Do not access @dma_device directly from ULP nor from HW drivers. */ |
| 2674 | struct device *dma_device; |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 2675 | struct ib_device_ops ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2676 | char name[IB_DEVICE_NAME_MAX]; |
Jason Gunthorpe | 324e227 | 2019-02-12 21:12:51 -0700 | [diff] [blame] | 2677 | struct rcu_head rcu_head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2678 | |
| 2679 | struct list_head event_handler_list; |
Parav Pandit | 6b57cea | 2019-12-12 13:30:22 +0200 | [diff] [blame] | 2680 | /* Protects event_handler_list */ |
| 2681 | struct rw_semaphore event_handler_rwsem; |
| 2682 | |
| 2683 | /* Protects QP's event_handler calls and open_qp list */ |
Parav Pandit | 40adf68 | 2019-12-12 13:30:24 +0200 | [diff] [blame] | 2684 | spinlock_t qp_open_list_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2685 | |
Jason Gunthorpe | 921eab1 | 2019-02-06 22:41:54 -0700 | [diff] [blame] | 2686 | struct rw_semaphore client_data_rwsem; |
Jason Gunthorpe | 0df91bb | 2019-02-06 22:41:53 -0700 | [diff] [blame] | 2687 | struct xarray client_data; |
Jason Gunthorpe | d089989 | 2019-02-12 21:12:53 -0700 | [diff] [blame] | 2688 | struct mutex unregistration_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2689 | |
Parav Pandit | 17e1064 | 2019-12-12 13:30:23 +0200 | [diff] [blame] | 2690 | /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */ |
| 2691 | rwlock_t cache_lock; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 2692 | /** |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 2693 | * port_data is indexed by port number |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 2694 | */ |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 2695 | struct ib_port_data *port_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2696 | |
Michael S. Tsirkin | f4fd0b2 | 2007-05-03 13:48:47 +0300 | [diff] [blame] | 2697 | int num_comp_vectors; |
| 2698 | |
Parav Pandit | cebe556 | 2019-02-26 13:56:11 +0200 | [diff] [blame] | 2699 | union { |
| 2700 | struct device dev; |
| 2701 | struct ib_core_device coredev; |
| 2702 | }; |
| 2703 | |
Jason Gunthorpe | b7066b3 | 2021-06-11 19:00:26 +0300 | [diff] [blame] | 2704 | /* First group is for device attributes, |
| 2705 | * Second group is for driver provided attributes (optional). |
| 2706 | * Third group is for the hw_stats |
| 2707 | * It is a NULL terminated array. |
Parav Pandit | d4122f5 | 2018-10-11 22:31:53 +0300 | [diff] [blame] | 2708 | */ |
Jason Gunthorpe | b7066b3 | 2021-06-11 19:00:26 +0300 | [diff] [blame] | 2709 | const struct attribute_group *groups[4]; |
Parav Pandit | adee9f3 | 2018-09-05 09:47:58 +0300 | [diff] [blame] | 2710 | |
Alexander Chiang | 17a55f7 | 2010-02-02 19:09:16 +0000 | [diff] [blame] | 2711 | u64 uverbs_cmd_mask; |
Roland Dreier | 274c089 | 2005-09-29 14:17:48 -0700 | [diff] [blame] | 2712 | |
Yuval Shaia | bd99fde | 2016-08-25 10:57:07 -0700 | [diff] [blame] | 2713 | char node_desc[IB_DEVICE_NODE_DESC_MAX]; |
Sean Hefty | cf311cd | 2006-01-10 07:39:34 -0800 | [diff] [blame] | 2714 | __be64 node_guid; |
Steve Wise | 96f15c0 | 2008-07-14 23:48:53 -0700 | [diff] [blame] | 2715 | u32 local_dma_lkey; |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2716 | u16 is_switch:1; |
Gal Pressman | 6780c4f | 2019-01-22 10:08:22 +0200 | [diff] [blame] | 2717 | /* Indicates kernel verbs support, should not be used in drivers */ |
| 2718 | u16 kverbs_provider:1; |
Yamin Friedman | da66297 | 2019-07-08 13:59:03 +0300 | [diff] [blame] | 2719 | /* CQ adaptive moderation (RDMA DIM) */ |
| 2720 | u16 use_cq_dim:1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2721 | u8 node_type; |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2722 | u32 phys_port_cnt; |
Ira Weiny | 3e153a9 | 2015-12-18 10:59:44 +0200 | [diff] [blame] | 2723 | struct ib_device_attr attrs; |
Jason Gunthorpe | 467f432 | 2021-06-11 19:00:22 +0300 | [diff] [blame] | 2724 | struct hw_stats_device_data *hw_stats_data; |
Ira Weiny | 7738613 | 2015-05-13 20:02:58 -0400 | [diff] [blame] | 2725 | |
Parav Pandit | 43579b5 | 2017-01-10 00:02:14 +0000 | [diff] [blame] | 2726 | #ifdef CONFIG_CGROUP_RDMA |
| 2727 | struct rdmacg_device cg_device; |
| 2728 | #endif |
| 2729 | |
Leon Romanovsky | ecc82c5 | 2017-06-18 14:39:59 +0300 | [diff] [blame] | 2730 | u32 index; |
Yamin Friedman | c7ff819 | 2020-05-27 11:34:53 +0300 | [diff] [blame] | 2731 | |
| 2732 | spinlock_t cq_pools_lock; |
| 2733 | struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1]; |
| 2734 | |
Leon Romanovsky | 41eda65 | 2019-02-18 22:25:47 +0200 | [diff] [blame] | 2735 | struct rdma_restrack_root *res; |
Leon Romanovsky | ecc82c5 | 2017-06-18 14:39:59 +0300 | [diff] [blame] | 2736 | |
Jason Gunthorpe | 0cbf432 | 2018-11-12 22:59:50 +0200 | [diff] [blame] | 2737 | const struct uapi_definition *driver_def; |
Jason Gunthorpe | d79af72 | 2019-01-10 14:02:24 -0700 | [diff] [blame] | 2738 | |
Parav Pandit | 01b6711 | 2018-11-16 03:50:57 +0200 | [diff] [blame] | 2739 | /* |
Jason Gunthorpe | d79af72 | 2019-01-10 14:02:24 -0700 | [diff] [blame] | 2740 | * Positive refcount indicates that the device is currently |
| 2741 | * registered and cannot be unregistered. |
Parav Pandit | 01b6711 | 2018-11-16 03:50:57 +0200 | [diff] [blame] | 2742 | */ |
| 2743 | refcount_t refcount; |
| 2744 | struct completion unreg_completion; |
Jason Gunthorpe | d089989 | 2019-02-12 21:12:53 -0700 | [diff] [blame] | 2745 | struct work_struct unregistration_work; |
Steve Wise | 3856ec4 | 2019-02-15 11:03:53 -0800 | [diff] [blame] | 2746 | |
| 2747 | const struct rdma_link_ops *link_ops; |
Parav Pandit | 4e0f7b9 | 2019-02-26 13:56:13 +0200 | [diff] [blame] | 2748 | |
| 2749 | /* Protects compat_devs xarray modifications */ |
| 2750 | struct mutex compat_devs_mutex; |
| 2751 | /* Maintains compat devices for each net namespace */ |
| 2752 | struct xarray compat_devs; |
Kamal Heib | dd05cb8 | 2019-04-29 14:59:06 +0300 | [diff] [blame] | 2753 | |
| 2754 | /* Used by iWarp CM */ |
| 2755 | char iw_ifname[IFNAMSIZ]; |
| 2756 | u32 iw_driver_flags; |
Maor Gottlieb | bd3920e | 2020-04-30 22:21:43 +0300 | [diff] [blame] | 2757 | u32 lag_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2758 | }; |
| 2759 | |
Leon Romanovsky | 514aee6 | 2021-07-23 14:39:50 +0300 | [diff] [blame] | 2760 | static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size, |
| 2761 | gfp_t gfp, bool is_numa_aware) |
| 2762 | { |
| 2763 | if (is_numa_aware && dev->ops.get_numa_node) |
| 2764 | return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev)); |
| 2765 | |
| 2766 | return kzalloc(size, gfp); |
| 2767 | } |
| 2768 | |
Jason Gunthorpe | 0e2d00e | 2019-06-13 21:38:18 -0300 | [diff] [blame] | 2769 | struct ib_client_nl_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2770 | struct ib_client { |
Jason Gunthorpe | e59178d | 2019-02-06 22:41:52 -0700 | [diff] [blame] | 2771 | const char *name; |
Jason Gunthorpe | 11a0ae4 | 2020-04-21 20:24:40 +0300 | [diff] [blame] | 2772 | int (*add)(struct ib_device *ibdev); |
Haggai Eran | 7c1eb45 | 2015-07-30 17:50:14 +0300 | [diff] [blame] | 2773 | void (*remove)(struct ib_device *, void *client_data); |
Leon Romanovsky | dc1435c | 2019-05-17 15:43:10 +0300 | [diff] [blame] | 2774 | void (*rename)(struct ib_device *dev, void *client_data); |
Jason Gunthorpe | 0e2d00e | 2019-06-13 21:38:18 -0300 | [diff] [blame] | 2775 | int (*get_nl_info)(struct ib_device *ibdev, void *client_data, |
| 2776 | struct ib_client_nl_info *res); |
| 2777 | int (*get_global_nl_info)(struct ib_client_nl_info *res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2778 | |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 2779 | /* Returns the net_dev belonging to this ib_client and matching the |
| 2780 | * given parameters. |
| 2781 | * @dev: An RDMA device that the net_dev use for communication. |
| 2782 | * @port: A physical port number on the RDMA device. |
| 2783 | * @pkey: P_Key that the net_dev uses if applicable. |
| 2784 | * @gid: A GID that the net_dev uses to communicate. |
| 2785 | * @addr: An IP address the net_dev is configured with. |
| 2786 | * @client_data: The device's client data set by ib_set_client_data(). |
| 2787 | * |
| 2788 | * An ib_client that implements a net_dev on top of RDMA devices |
| 2789 | * (such as IP over IB) should implement this callback, allowing the |
| 2790 | * rdma_cm module to find the right net_dev for a given request. |
| 2791 | * |
| 2792 | * The caller is responsible for calling dev_put on the returned |
| 2793 | * netdev. */ |
| 2794 | struct net_device *(*get_net_dev_by_params)( |
| 2795 | struct ib_device *dev, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2796 | u32 port, |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 2797 | u16 pkey, |
| 2798 | const union ib_gid *gid, |
| 2799 | const struct sockaddr *addr, |
| 2800 | void *client_data); |
Jason Gunthorpe | 621e55f | 2019-07-31 11:18:40 +0300 | [diff] [blame] | 2801 | |
| 2802 | refcount_t uses; |
| 2803 | struct completion uses_zero; |
Jason Gunthorpe | e59178d | 2019-02-06 22:41:52 -0700 | [diff] [blame] | 2804 | u32 client_id; |
Gal Pressman | 6780c4f | 2019-01-22 10:08:22 +0200 | [diff] [blame] | 2805 | |
| 2806 | /* kverbs are not required by the client */ |
| 2807 | u8 no_kverbs_req:1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2808 | }; |
| 2809 | |
Shiraz Saleem | a808273 | 2019-05-06 08:53:33 -0500 | [diff] [blame] | 2810 | /* |
| 2811 | * IB block DMA iterator |
| 2812 | * |
| 2813 | * Iterates the DMA-mapped SGL in contiguous memory blocks aligned |
| 2814 | * to a HW supported page size. |
| 2815 | */ |
| 2816 | struct ib_block_iter { |
| 2817 | /* internal states */ |
| 2818 | struct scatterlist *__sg; /* sg holding the current aligned block */ |
| 2819 | dma_addr_t __dma_addr; /* unaligned DMA address of this block */ |
| 2820 | unsigned int __sg_nents; /* number of SG entries */ |
| 2821 | unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ |
| 2822 | unsigned int __pg_bit; /* alignment of current block */ |
| 2823 | }; |
| 2824 | |
Leon Romanovsky | 459cc69 | 2019-01-30 12:49:11 +0200 | [diff] [blame] | 2825 | struct ib_device *_ib_alloc_device(size_t size); |
| 2826 | #define ib_alloc_device(drv_struct, member) \ |
| 2827 | container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ |
| 2828 | BUILD_BUG_ON_ZERO(offsetof( \ |
| 2829 | struct drv_struct, member))), \ |
| 2830 | struct drv_struct, member) |
| 2831 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2832 | void ib_dealloc_device(struct ib_device *device); |
| 2833 | |
Leon Romanovsky | 9abb0d1 | 2017-06-27 16:49:53 +0300 | [diff] [blame] | 2834 | void ib_get_device_fw_str(struct ib_device *device, char *str); |
Ira Weiny | 5fa76c2 | 2016-06-15 02:21:56 -0400 | [diff] [blame] | 2835 | |
Jason Gunthorpe | e0477b3 | 2020-10-08 11:27:52 +0300 | [diff] [blame] | 2836 | int ib_register_device(struct ib_device *device, const char *name, |
| 2837 | struct device *dma_device); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2838 | void ib_unregister_device(struct ib_device *device); |
Jason Gunthorpe | d089989 | 2019-02-12 21:12:53 -0700 | [diff] [blame] | 2839 | void ib_unregister_driver(enum rdma_driver_id driver_id); |
| 2840 | void ib_unregister_device_and_put(struct ib_device *device); |
| 2841 | void ib_unregister_device_queued(struct ib_device *ib_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2842 | |
| 2843 | int ib_register_client (struct ib_client *client); |
| 2844 | void ib_unregister_client(struct ib_client *client); |
| 2845 | |
Shiraz Saleem | a808273 | 2019-05-06 08:53:33 -0500 | [diff] [blame] | 2846 | void __rdma_block_iter_start(struct ib_block_iter *biter, |
| 2847 | struct scatterlist *sglist, |
| 2848 | unsigned int nents, |
| 2849 | unsigned long pgsz); |
| 2850 | bool __rdma_block_iter_next(struct ib_block_iter *biter); |
| 2851 | |
| 2852 | /** |
| 2853 | * rdma_block_iter_dma_address - get the aligned dma address of the current |
| 2854 | * block held by the block iterator. |
| 2855 | * @biter: block iterator holding the memory block |
| 2856 | */ |
| 2857 | static inline dma_addr_t |
| 2858 | rdma_block_iter_dma_address(struct ib_block_iter *biter) |
| 2859 | { |
| 2860 | return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); |
| 2861 | } |
| 2862 | |
| 2863 | /** |
| 2864 | * rdma_for_each_block - iterate over contiguous memory blocks of the sg list |
| 2865 | * @sglist: sglist to iterate over |
| 2866 | * @biter: block iterator holding the memory block |
| 2867 | * @nents: maximum number of sg entries to iterate over |
| 2868 | * @pgsz: best HW supported page size to use |
| 2869 | * |
| 2870 | * Callers may use rdma_block_iter_dma_address() to get each |
| 2871 | * blocks aligned DMA address. |
| 2872 | */ |
| 2873 | #define rdma_for_each_block(sglist, biter, nents, pgsz) \ |
| 2874 | for (__rdma_block_iter_start(biter, sglist, nents, \ |
| 2875 | pgsz); \ |
| 2876 | __rdma_block_iter_next(biter);) |
| 2877 | |
Jason Gunthorpe | 0df91bb | 2019-02-06 22:41:53 -0700 | [diff] [blame] | 2878 | /** |
| 2879 | * ib_get_client_data - Get IB client context |
| 2880 | * @device:Device to get context for |
| 2881 | * @client:Client to get context for |
| 2882 | * |
| 2883 | * ib_get_client_data() returns the client context data set with |
| 2884 | * ib_set_client_data(). This can only be called while the client is |
| 2885 | * registered to the device, once the ib_client remove() callback returns this |
| 2886 | * cannot be called. |
| 2887 | */ |
| 2888 | static inline void *ib_get_client_data(struct ib_device *device, |
| 2889 | struct ib_client *client) |
| 2890 | { |
| 2891 | return xa_load(&device->client_data, client->client_id); |
| 2892 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2893 | void ib_set_client_data(struct ib_device *device, struct ib_client *client, |
| 2894 | void *data); |
Kamal Heib | 521ed0d | 2018-12-10 21:09:30 +0200 | [diff] [blame] | 2895 | void ib_set_device_ops(struct ib_device *device, |
| 2896 | const struct ib_device_ops *ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2897 | |
Jason Gunthorpe | 5f9794d | 2018-09-16 20:43:08 +0300 | [diff] [blame] | 2898 | int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, |
Michal Kalderon | c043ff2 | 2019-10-30 11:44:12 +0200 | [diff] [blame] | 2899 | unsigned long pfn, unsigned long size, pgprot_t prot, |
| 2900 | struct rdma_user_mmap_entry *entry); |
Michal Kalderon | 3411f9f | 2019-10-30 11:44:11 +0200 | [diff] [blame] | 2901 | int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, |
| 2902 | struct rdma_user_mmap_entry *entry, |
| 2903 | size_t length); |
Yishai Hadas | 7a763d1 | 2019-12-12 12:02:36 +0200 | [diff] [blame] | 2904 | int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, |
| 2905 | struct rdma_user_mmap_entry *entry, |
| 2906 | size_t length, u32 min_pgoff, |
| 2907 | u32 max_pgoff); |
| 2908 | |
Michal Kalderon | 3411f9f | 2019-10-30 11:44:11 +0200 | [diff] [blame] | 2909 | struct rdma_user_mmap_entry * |
| 2910 | rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext, |
| 2911 | unsigned long pgoff); |
| 2912 | struct rdma_user_mmap_entry * |
| 2913 | rdma_user_mmap_entry_get(struct ib_ucontext *ucontext, |
| 2914 | struct vm_area_struct *vma); |
| 2915 | void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry); |
| 2916 | |
| 2917 | void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry); |
Jason Gunthorpe | 5f9794d | 2018-09-16 20:43:08 +0300 | [diff] [blame] | 2918 | |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 2919 | static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) |
| 2920 | { |
| 2921 | return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; |
| 2922 | } |
| 2923 | |
| 2924 | static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) |
| 2925 | { |
Yann Droneaud | 43c61165 | 2015-02-05 22:10:18 +0100 | [diff] [blame] | 2926 | return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; |
Roland Dreier | e2773c0 | 2005-07-07 17:57:10 -0700 | [diff] [blame] | 2927 | } |
| 2928 | |
Matan Barak | c66db31 | 2018-03-19 15:02:36 +0200 | [diff] [blame] | 2929 | static inline bool ib_is_buffer_cleared(const void __user *p, |
| 2930 | size_t len) |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2931 | { |
Markus Elfring | 92d27ae | 2016-08-22 18:23:24 +0200 | [diff] [blame] | 2932 | bool ret; |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2933 | u8 *buf; |
| 2934 | |
| 2935 | if (len > USHRT_MAX) |
| 2936 | return false; |
| 2937 | |
Markus Elfring | 92d27ae | 2016-08-22 18:23:24 +0200 | [diff] [blame] | 2938 | buf = memdup_user(p, len); |
| 2939 | if (IS_ERR(buf)) |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2940 | return false; |
| 2941 | |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2942 | ret = !memchr_inv(buf, 0, len); |
Matan Barak | 301a721 | 2015-12-15 20:30:10 +0200 | [diff] [blame] | 2943 | kfree(buf); |
| 2944 | return ret; |
| 2945 | } |
| 2946 | |
Matan Barak | c66db31 | 2018-03-19 15:02:36 +0200 | [diff] [blame] | 2947 | static inline bool ib_is_udata_cleared(struct ib_udata *udata, |
| 2948 | size_t offset, |
| 2949 | size_t len) |
| 2950 | { |
| 2951 | return ib_is_buffer_cleared(udata->inbuf + offset, len); |
| 2952 | } |
| 2953 | |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 2954 | /** |
| 2955 | * ib_modify_qp_is_ok - Check that the supplied attribute mask |
| 2956 | * contains all required attributes and no attributes not allowed for |
| 2957 | * the given QP state transition. |
| 2958 | * @cur_state: Current QP state |
| 2959 | * @next_state: Next QP state |
| 2960 | * @type: QP type |
| 2961 | * @mask: Mask of supplied QP attributes |
| 2962 | * |
| 2963 | * This function is a helper function that a low-level driver's |
| 2964 | * modify_qp method can use to validate the consumer's input. It |
| 2965 | * checks that cur_state and next_state are valid QP states, that a |
| 2966 | * transition from cur_state to next_state is allowed by the IB spec, |
| 2967 | * and that the attribute mask supplied is allowed for the transition. |
| 2968 | */ |
Leon Romanovsky | 19b1f54 | 2018-03-11 13:51:35 +0200 | [diff] [blame] | 2969 | bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, |
Kamal Heib | d31131b | 2018-10-02 16:11:21 +0300 | [diff] [blame] | 2970 | enum ib_qp_type type, enum ib_qp_attr_mask mask); |
Roland Dreier | 8a51866 | 2006-02-13 12:48:12 -0800 | [diff] [blame] | 2971 | |
Leon Romanovsky | dcc9881 | 2017-08-17 15:50:36 +0300 | [diff] [blame] | 2972 | void ib_register_event_handler(struct ib_event_handler *event_handler); |
| 2973 | void ib_unregister_event_handler(struct ib_event_handler *event_handler); |
Parav Pandit | 6b57cea | 2019-12-12 13:30:22 +0200 | [diff] [blame] | 2974 | void ib_dispatch_event(const struct ib_event *event); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2975 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2976 | int ib_query_port(struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2977 | u32 port_num, struct ib_port_attr *port_attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2978 | |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 2979 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 2980 | u32 port_num); |
Eli Cohen | a3f5ada | 2010-09-27 17:51:10 -0700 | [diff] [blame] | 2981 | |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 2982 | /** |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 2983 | * rdma_cap_ib_switch - Check if the device is IB switch |
| 2984 | * @device: Device to check |
| 2985 | * |
| 2986 | * Device driver is responsible for setting is_switch bit on |
| 2987 | * in ib_device structure at init time. |
| 2988 | * |
| 2989 | * Return: true if the device is IB switch. |
| 2990 | */ |
| 2991 | static inline bool rdma_cap_ib_switch(const struct ib_device *device) |
| 2992 | { |
| 2993 | return device->is_switch; |
| 2994 | } |
| 2995 | |
| 2996 | /** |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 2997 | * rdma_start_port - Return the first valid port number for the device |
| 2998 | * specified |
| 2999 | * |
| 3000 | * @device: Device to be checked |
| 3001 | * |
| 3002 | * Return start port number |
| 3003 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3004 | static inline u32 rdma_start_port(const struct ib_device *device) |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 3005 | { |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 3006 | return rdma_cap_ib_switch(device) ? 0 : 1; |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 3007 | } |
| 3008 | |
| 3009 | /** |
Jason Gunthorpe | ea1075e | 2019-02-12 21:12:47 -0700 | [diff] [blame] | 3010 | * rdma_for_each_port - Iterate over all valid port numbers of the IB device |
| 3011 | * @device - The struct ib_device * to iterate over |
| 3012 | * @iter - The unsigned int to store the port number |
| 3013 | */ |
| 3014 | #define rdma_for_each_port(device, iter) \ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3015 | for (iter = rdma_start_port(device + \ |
| 3016 | BUILD_BUG_ON_ZERO(!__same_type(u32, \ |
| 3017 | iter))); \ |
| 3018 | iter <= rdma_end_port(device); iter++) |
Jason Gunthorpe | ea1075e | 2019-02-12 21:12:47 -0700 | [diff] [blame] | 3019 | |
| 3020 | /** |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 3021 | * rdma_end_port - Return the last valid port number for the device |
| 3022 | * specified |
| 3023 | * |
| 3024 | * @device: Device to be checked |
| 3025 | * |
| 3026 | * Return last port number |
| 3027 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3028 | static inline u32 rdma_end_port(const struct ib_device *device) |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 3029 | { |
Hal Rosenstock | 4139032 | 2015-06-29 09:57:00 -0400 | [diff] [blame] | 3030 | return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; |
Ira Weiny | 0cf18d7 | 2015-05-13 20:02:55 -0400 | [diff] [blame] | 3031 | } |
| 3032 | |
Yuval Shaia | 24dc831 | 2017-01-25 18:41:37 +0200 | [diff] [blame] | 3033 | static inline int rdma_is_port_valid(const struct ib_device *device, |
| 3034 | unsigned int port) |
| 3035 | { |
| 3036 | return (port >= rdma_start_port(device) && |
| 3037 | port <= rdma_end_port(device)); |
| 3038 | } |
| 3039 | |
Artemy Kovalyov | b02289b | 2018-07-04 15:57:50 +0300 | [diff] [blame] | 3040 | static inline bool rdma_is_grh_required(const struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3041 | u32 port_num) |
Artemy Kovalyov | b02289b | 2018-07-04 15:57:50 +0300 | [diff] [blame] | 3042 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3043 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3044 | RDMA_CORE_PORT_IB_GRH_REQUIRED; |
Artemy Kovalyov | b02289b | 2018-07-04 15:57:50 +0300 | [diff] [blame] | 3045 | } |
| 3046 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3047 | static inline bool rdma_protocol_ib(const struct ib_device *device, |
| 3048 | u32 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 3049 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3050 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3051 | RDMA_CORE_CAP_PROT_IB; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 3052 | } |
| 3053 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3054 | static inline bool rdma_protocol_roce(const struct ib_device *device, |
| 3055 | u32 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 3056 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3057 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3058 | (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 3059 | } |
| 3060 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3061 | static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, |
| 3062 | u32 port_num) |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 3063 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3064 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3065 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 3066 | } |
| 3067 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3068 | static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, |
| 3069 | u32 port_num) |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 3070 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3071 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3072 | RDMA_CORE_CAP_PROT_ROCE; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 3073 | } |
| 3074 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3075 | static inline bool rdma_protocol_iwarp(const struct ib_device *device, |
| 3076 | u32 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 3077 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3078 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3079 | RDMA_CORE_CAP_PROT_IWARP; |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 3080 | } |
| 3081 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3082 | static inline bool rdma_ib_or_roce(const struct ib_device *device, |
| 3083 | u32 port_num) |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 3084 | { |
Matan Barak | 7766a99 | 2015-12-23 14:56:50 +0200 | [diff] [blame] | 3085 | return rdma_protocol_ib(device, port_num) || |
| 3086 | rdma_protocol_roce(device, port_num); |
Michael Wang | de66be9 | 2015-05-05 14:50:19 +0200 | [diff] [blame] | 3087 | } |
| 3088 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3089 | static inline bool rdma_protocol_raw_packet(const struct ib_device *device, |
| 3090 | u32 port_num) |
Or Gerlitz | aa773bd | 2017-01-24 13:02:35 +0200 | [diff] [blame] | 3091 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3092 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3093 | RDMA_CORE_CAP_PROT_RAW_PACKET; |
Or Gerlitz | aa773bd | 2017-01-24 13:02:35 +0200 | [diff] [blame] | 3094 | } |
| 3095 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3096 | static inline bool rdma_protocol_usnic(const struct ib_device *device, |
| 3097 | u32 port_num) |
Or Gerlitz | ce1e055 | 2017-01-24 13:02:38 +0200 | [diff] [blame] | 3098 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3099 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3100 | RDMA_CORE_CAP_PROT_USNIC; |
Or Gerlitz | ce1e055 | 2017-01-24 13:02:38 +0200 | [diff] [blame] | 3101 | } |
| 3102 | |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3103 | /** |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3104 | * rdma_cap_ib_mad - Check if the port of a device supports Infiniband |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3105 | * Management Datagrams. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3106 | * @device: Device to check |
| 3107 | * @port_num: Port number to check |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3108 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3109 | * Management Datagrams (MAD) are a required part of the InfiniBand |
| 3110 | * specification and are supported on all InfiniBand devices. A slightly |
| 3111 | * extended version are also supported on OPA interfaces. |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3112 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3113 | * Return: true if the port supports sending/receiving of MAD packets. |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3114 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3115 | static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num) |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3116 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3117 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3118 | RDMA_CORE_CAP_IB_MAD; |
Michael Wang | c757dea | 2015-05-05 14:50:32 +0200 | [diff] [blame] | 3119 | } |
| 3120 | |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 3121 | /** |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 3122 | * rdma_cap_opa_mad - Check if the port of device provides support for OPA |
| 3123 | * Management Datagrams. |
| 3124 | * @device: Device to check |
| 3125 | * @port_num: Port number to check |
| 3126 | * |
| 3127 | * Intel OmniPath devices extend and/or replace the InfiniBand Management |
| 3128 | * datagrams with their own versions. These OPA MADs share many but not all of |
| 3129 | * the characteristics of InfiniBand MADs. |
| 3130 | * |
| 3131 | * OPA MADs differ in the following ways: |
| 3132 | * |
| 3133 | * 1) MADs are variable size up to 2K |
| 3134 | * IBTA defined MADs remain fixed at 256 bytes |
| 3135 | * 2) OPA SMPs must carry valid PKeys |
| 3136 | * 3) OPA SMP packets are a different format |
| 3137 | * |
| 3138 | * Return: true if the port supports OPA MAD packet formats. |
| 3139 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3140 | static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num) |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 3141 | { |
Leon Romanovsky | d3243da | 2019-03-10 17:27:46 +0200 | [diff] [blame] | 3142 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3143 | RDMA_CORE_CAP_OPA_MAD; |
Ira Weiny | 65995fe | 2015-06-06 14:38:32 -0400 | [diff] [blame] | 3144 | } |
| 3145 | |
| 3146 | /** |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3147 | * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband |
| 3148 | * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). |
| 3149 | * @device: Device to check |
| 3150 | * @port_num: Port number to check |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 3151 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3152 | * Each InfiniBand node is required to provide a Subnet Management Agent |
| 3153 | * that the subnet manager can access. Prior to the fabric being fully |
| 3154 | * configured by the subnet manager, the SMA is accessed via a well known |
| 3155 | * interface called the Subnet Management Interface (SMI). This interface |
| 3156 | * uses directed route packets to communicate with the SM to get around the |
| 3157 | * chicken and egg problem of the SM needing to know what's on the fabric |
| 3158 | * in order to configure the fabric, and needing to configure the fabric in |
| 3159 | * order to send packets to the devices on the fabric. These directed |
| 3160 | * route packets do not need the fabric fully configured in order to reach |
| 3161 | * their destination. The SMI is the only method allowed to send |
| 3162 | * directed route packets on an InfiniBand fabric. |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 3163 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3164 | * Return: true if the port provides an SMI. |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 3165 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3166 | static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num) |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 3167 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3168 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3169 | RDMA_CORE_CAP_IB_SMI; |
Michael Wang | 29541e3 | 2015-05-05 14:50:33 +0200 | [diff] [blame] | 3170 | } |
| 3171 | |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 3172 | /** |
| 3173 | * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband |
| 3174 | * Communication Manager. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3175 | * @device: Device to check |
| 3176 | * @port_num: Port number to check |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 3177 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3178 | * The InfiniBand Communication Manager is one of many pre-defined General |
| 3179 | * Service Agents (GSA) that are accessed via the General Service |
| 3180 | * Interface (GSI). It's role is to facilitate establishment of connections |
| 3181 | * between nodes as well as other management related tasks for established |
| 3182 | * connections. |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 3183 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3184 | * Return: true if the port supports an IB CM (this does not guarantee that |
| 3185 | * a CM is actually running however). |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 3186 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3187 | static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num) |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 3188 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3189 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3190 | RDMA_CORE_CAP_IB_CM; |
Michael Wang | 72219cea | 2015-05-05 14:50:34 +0200 | [diff] [blame] | 3191 | } |
| 3192 | |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 3193 | /** |
| 3194 | * rdma_cap_iw_cm - Check if the port of device has the capability IWARP |
| 3195 | * Communication Manager. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3196 | * @device: Device to check |
| 3197 | * @port_num: Port number to check |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 3198 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3199 | * Similar to above, but specific to iWARP connections which have a different |
| 3200 | * managment protocol than InfiniBand. |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 3201 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3202 | * Return: true if the port supports an iWARP CM (this does not guarantee that |
| 3203 | * a CM is actually running however). |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 3204 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3205 | static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num) |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 3206 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3207 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3208 | RDMA_CORE_CAP_IW_CM; |
Michael Wang | 0421533 | 2015-05-05 14:50:35 +0200 | [diff] [blame] | 3209 | } |
| 3210 | |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 3211 | /** |
| 3212 | * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband |
| 3213 | * Subnet Administration. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3214 | * @device: Device to check |
| 3215 | * @port_num: Port number to check |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 3216 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3217 | * An InfiniBand Subnet Administration (SA) service is a pre-defined General |
| 3218 | * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand |
| 3219 | * fabrics, devices should resolve routes to other hosts by contacting the |
| 3220 | * SA to query the proper route. |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 3221 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3222 | * Return: true if the port should act as a client to the fabric Subnet |
| 3223 | * Administration interface. This does not imply that the SA service is |
| 3224 | * running locally. |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 3225 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3226 | static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num) |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 3227 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3228 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3229 | RDMA_CORE_CAP_IB_SA; |
Michael Wang | fe53ba2 | 2015-05-05 14:50:36 +0200 | [diff] [blame] | 3230 | } |
| 3231 | |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 3232 | /** |
| 3233 | * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband |
| 3234 | * Multicast. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3235 | * @device: Device to check |
| 3236 | * @port_num: Port number to check |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 3237 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3238 | * InfiniBand multicast registration is more complex than normal IPv4 or |
| 3239 | * IPv6 multicast registration. Each Host Channel Adapter must register |
| 3240 | * with the Subnet Manager when it wishes to join a multicast group. It |
| 3241 | * should do so only once regardless of how many queue pairs it subscribes |
| 3242 | * to this group. And it should leave the group only after all queue pairs |
| 3243 | * attached to the group have been detached. |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 3244 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3245 | * Return: true if the port must undertake the additional adminstrative |
| 3246 | * overhead of registering/unregistering with the SM and tracking of the |
| 3247 | * total number of queue pairs attached to the multicast group. |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 3248 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3249 | static inline bool rdma_cap_ib_mcast(const struct ib_device *device, |
| 3250 | u32 port_num) |
Michael Wang | a31ad3b | 2015-05-05 14:50:37 +0200 | [diff] [blame] | 3251 | { |
| 3252 | return rdma_cap_ib_sa(device, port_num); |
| 3253 | } |
| 3254 | |
Michael Wang | bc0f1d7 | 2015-05-05 14:50:38 +0200 | [diff] [blame] | 3255 | /** |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 3256 | * rdma_cap_af_ib - Check if the port of device has the capability |
| 3257 | * Native Infiniband Address. |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3258 | * @device: Device to check |
| 3259 | * @port_num: Port number to check |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 3260 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3261 | * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default |
| 3262 | * GID. RoCE uses a different mechanism, but still generates a GID via |
| 3263 | * a prescribed mechanism and port specific data. |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 3264 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3265 | * Return: true if the port uses a GID address to identify devices on the |
| 3266 | * network. |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 3267 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3268 | static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num) |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 3269 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3270 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3271 | RDMA_CORE_CAP_AF_IB; |
Michael Wang | 30a74ef | 2015-05-05 14:50:39 +0200 | [diff] [blame] | 3272 | } |
| 3273 | |
| 3274 | /** |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 3275 | * rdma_cap_eth_ah - Check if the port of device has the capability |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3276 | * Ethernet Address Handle. |
| 3277 | * @device: Device to check |
| 3278 | * @port_num: Port number to check |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 3279 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3280 | * RoCE is InfiniBand over Ethernet, and it uses a well defined technique |
| 3281 | * to fabricate GIDs over Ethernet/IP specific addresses native to the |
| 3282 | * port. Normally, packet headers are generated by the sending host |
| 3283 | * adapter, but when sending connectionless datagrams, we must manually |
| 3284 | * inject the proper headers for the fabric we are communicating over. |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 3285 | * |
Michael Wang | 296ec00 | 2015-05-18 10:41:45 +0200 | [diff] [blame] | 3286 | * Return: true if we are running as a RoCE port and must force the |
| 3287 | * addition of a Global Route Header built from our Ethernet Address |
| 3288 | * Handle into our header list for connectionless packets. |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 3289 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3290 | static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num) |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 3291 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3292 | return device->port_data[port_num].immutable.core_cap_flags & |
| 3293 | RDMA_CORE_CAP_ETH_AH; |
Michael Wang | 227128f | 2015-05-05 14:50:40 +0200 | [diff] [blame] | 3294 | } |
| 3295 | |
| 3296 | /** |
Dasaratharaman Chandramouli | 94d595c | 2017-03-20 19:38:09 -0400 | [diff] [blame] | 3297 | * rdma_cap_opa_ah - Check if the port of device supports |
| 3298 | * OPA Address handles |
| 3299 | * @device: Device to check |
| 3300 | * @port_num: Port number to check |
| 3301 | * |
| 3302 | * Return: true if we are running on an OPA device which supports |
| 3303 | * the extended OPA addressing. |
| 3304 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3305 | static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num) |
Dasaratharaman Chandramouli | 94d595c | 2017-03-20 19:38:09 -0400 | [diff] [blame] | 3306 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3307 | return (device->port_data[port_num].immutable.core_cap_flags & |
Dasaratharaman Chandramouli | 94d595c | 2017-03-20 19:38:09 -0400 | [diff] [blame] | 3308 | RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; |
| 3309 | } |
| 3310 | |
| 3311 | /** |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 3312 | * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. |
| 3313 | * |
| 3314 | * @device: Device |
| 3315 | * @port_num: Port number |
| 3316 | * |
| 3317 | * This MAD size includes the MAD headers and MAD payload. No other headers |
| 3318 | * are included. |
| 3319 | * |
| 3320 | * Return the max MAD size required by the Port. Will return 0 if the port |
| 3321 | * does not support MADs |
| 3322 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3323 | static inline size_t rdma_max_mad_size(const struct ib_device *device, |
| 3324 | u32 port_num) |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 3325 | { |
Jason Gunthorpe | 8ceb135 | 2019-02-12 21:12:48 -0700 | [diff] [blame] | 3326 | return device->port_data[port_num].immutable.max_mad_size; |
Ira Weiny | 337877a | 2015-06-06 14:38:29 -0400 | [diff] [blame] | 3327 | } |
| 3328 | |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 3329 | /** |
| 3330 | * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table |
| 3331 | * @device: Device to check |
| 3332 | * @port_num: Port number to check |
| 3333 | * |
| 3334 | * RoCE GID table mechanism manages the various GIDs for a device. |
| 3335 | * |
| 3336 | * NOTE: if allocating the port's GID table has failed, this call will still |
| 3337 | * return true, but any RoCE GID table API will fail. |
| 3338 | * |
| 3339 | * Return: true if the port uses RoCE GID table mechanism in order to manage |
| 3340 | * its GIDs. |
| 3341 | */ |
| 3342 | static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3343 | u32 port_num) |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 3344 | { |
| 3345 | return rdma_protocol_roce(device, port_num) && |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 3346 | device->ops.add_gid && device->ops.del_gid; |
Matan Barak | 03db3a2 | 2015-07-30 18:33:26 +0300 | [diff] [blame] | 3347 | } |
| 3348 | |
Christoph Hellwig | 002516e | 2016-05-03 18:01:05 +0200 | [diff] [blame] | 3349 | /* |
| 3350 | * Check if the device supports READ W/ INVALIDATE. |
| 3351 | */ |
| 3352 | static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) |
| 3353 | { |
| 3354 | /* |
| 3355 | * iWarp drivers must support READ W/ INVALIDATE. No other protocol |
| 3356 | * has support for it yet. |
| 3357 | */ |
| 3358 | return rdma_protocol_iwarp(dev, port_num); |
| 3359 | } |
| 3360 | |
Shiraz Saleem | 4a35339 | 2019-05-06 08:53:32 -0500 | [diff] [blame] | 3361 | /** |
Kaike Wan | 6d72344 | 2020-05-11 12:06:18 -0400 | [diff] [blame] | 3362 | * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not. |
| 3363 | * @device: Device |
| 3364 | * @port_num: 1 based Port number |
| 3365 | * |
| 3366 | * Return true if port is an Intel OPA port , false if not |
| 3367 | */ |
| 3368 | static inline bool rdma_core_cap_opa_port(struct ib_device *device, |
| 3369 | u32 port_num) |
| 3370 | { |
| 3371 | return (device->port_data[port_num].immutable.core_cap_flags & |
| 3372 | RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA; |
| 3373 | } |
| 3374 | |
| 3375 | /** |
| 3376 | * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value. |
| 3377 | * @device: Device |
| 3378 | * @port_num: Port number |
| 3379 | * @mtu: enum value of MTU |
| 3380 | * |
| 3381 | * Return the MTU size supported by the port as an integer value. Will return |
| 3382 | * -1 if enum value of mtu is not supported. |
| 3383 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3384 | static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port, |
Kaike Wan | 6d72344 | 2020-05-11 12:06:18 -0400 | [diff] [blame] | 3385 | int mtu) |
| 3386 | { |
| 3387 | if (rdma_core_cap_opa_port(device, port)) |
| 3388 | return opa_mtu_enum_to_int((enum opa_mtu)mtu); |
| 3389 | else |
| 3390 | return ib_mtu_enum_to_int((enum ib_mtu)mtu); |
| 3391 | } |
| 3392 | |
| 3393 | /** |
| 3394 | * rdma_mtu_from_attr - Return the mtu of the port from the port attribute. |
| 3395 | * @device: Device |
| 3396 | * @port_num: Port number |
| 3397 | * @attr: port attribute |
| 3398 | * |
| 3399 | * Return the MTU size supported by the port as an integer value. |
| 3400 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3401 | static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port, |
Kaike Wan | 6d72344 | 2020-05-11 12:06:18 -0400 | [diff] [blame] | 3402 | struct ib_port_attr *attr) |
| 3403 | { |
| 3404 | if (rdma_core_cap_opa_port(device, port)) |
| 3405 | return attr->phys_mtu; |
| 3406 | else |
| 3407 | return ib_mtu_enum_to_int(attr->max_mtu); |
| 3408 | } |
| 3409 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3410 | int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 3411 | int state); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3412 | int ib_get_vf_config(struct ib_device *device, int vf, u32 port, |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 3413 | struct ifla_vf_info *info); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3414 | int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 3415 | struct ifla_vf_stats *stats); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3416 | int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, |
Danit Goldberg | bfcb3c5d | 2019-11-06 15:08:32 +0200 | [diff] [blame] | 3417 | struct ifla_vf_guid *node_guid, |
| 3418 | struct ifla_vf_guid *port_guid); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3419 | int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, |
Eli Cohen | 50174a7 | 2016-03-11 22:58:38 +0200 | [diff] [blame] | 3420 | int type); |
| 3421 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3422 | int ib_query_pkey(struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3423 | u32 port_num, u16 index, u16 *pkey); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3424 | |
| 3425 | int ib_modify_device(struct ib_device *device, |
| 3426 | int device_modify_mask, |
| 3427 | struct ib_device_modify *device_modify); |
| 3428 | |
| 3429 | int ib_modify_port(struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3430 | u32 port_num, int port_modify_mask, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3431 | struct ib_port_modify *port_modify); |
| 3432 | |
Yosef Etigin | 5eb620c | 2007-05-14 07:26:51 +0300 | [diff] [blame] | 3433 | int ib_find_gid(struct ib_device *device, union ib_gid *gid, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3434 | u32 *port_num, u16 *index); |
Yosef Etigin | 5eb620c | 2007-05-14 07:26:51 +0300 | [diff] [blame] | 3435 | |
| 3436 | int ib_find_pkey(struct ib_device *device, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3437 | u32 port_num, u16 pkey, u16 *index); |
Yosef Etigin | 5eb620c | 2007-05-14 07:26:51 +0300 | [diff] [blame] | 3438 | |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 3439 | enum ib_pd_flags { |
| 3440 | /* |
| 3441 | * Create a memory registration for all memory in the system and place |
| 3442 | * the rkey for it into pd->unsafe_global_rkey. This can be used by |
| 3443 | * ULPs to avoid the overhead of dynamic MRs. |
| 3444 | * |
| 3445 | * This flag is generally considered unsafe and must only be used in |
| 3446 | * extremly trusted environments. Every use of it will log a warning |
| 3447 | * in the kernel log. |
| 3448 | */ |
| 3449 | IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, |
| 3450 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3451 | |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 3452 | struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, |
| 3453 | const char *caller); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3454 | |
Mauro Carvalho Chehab | 2988ca0 | 2020-12-01 13:08:55 +0100 | [diff] [blame] | 3455 | /** |
| 3456 | * ib_alloc_pd - Allocates an unused protection domain. |
| 3457 | * @device: The device on which to allocate the protection domain. |
| 3458 | * @flags: protection domain flags |
| 3459 | * |
| 3460 | * A protection domain object provides an association between QPs, shared |
| 3461 | * receive queues, address handles, memory regions, and memory windows. |
| 3462 | * |
| 3463 | * Every PD has a local_dma_lkey which can be used as the lkey value for local |
| 3464 | * memory operations. |
| 3465 | */ |
Christoph Hellwig | ed082d3 | 2016-09-05 12:56:17 +0200 | [diff] [blame] | 3466 | #define ib_alloc_pd(device, flags) \ |
Leon Romanovsky | e449644 | 2018-01-28 11:17:18 +0200 | [diff] [blame] | 3467 | __ib_alloc_pd((device), (flags), KBUILD_MODNAME) |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3468 | |
Leon Romanovsky | 91a7c58 | 2020-09-07 15:09:13 +0300 | [diff] [blame] | 3469 | int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3470 | |
| 3471 | /** |
| 3472 | * ib_dealloc_pd - Deallocate kernel PD |
| 3473 | * @pd: The protection domain |
| 3474 | * |
| 3475 | * NOTE: for user PD use ib_dealloc_pd_user with valid udata! |
| 3476 | */ |
| 3477 | static inline void ib_dealloc_pd(struct ib_pd *pd) |
| 3478 | { |
Leon Romanovsky | 91a7c58 | 2020-09-07 15:09:13 +0300 | [diff] [blame] | 3479 | int ret = ib_dealloc_pd_user(pd, NULL); |
| 3480 | |
| 3481 | WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail"); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3482 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3483 | |
Gal Pressman | b090c4e | 2018-12-12 11:09:05 +0200 | [diff] [blame] | 3484 | enum rdma_create_ah_flags { |
| 3485 | /* In a sleepable context */ |
| 3486 | RDMA_CREATE_AH_SLEEPABLE = BIT(0), |
| 3487 | }; |
| 3488 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3489 | /** |
Dasaratharaman Chandramouli | 0a18cfe | 2017-04-29 14:41:19 -0400 | [diff] [blame] | 3490 | * rdma_create_ah - Creates an address handle for the given address vector. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3491 | * @pd: The protection domain associated with the address handle. |
| 3492 | * @ah_attr: The attributes of the address vector. |
Gal Pressman | b090c4e | 2018-12-12 11:09:05 +0200 | [diff] [blame] | 3493 | * @flags: Create address handle flags (see enum rdma_create_ah_flags). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3494 | * |
| 3495 | * The address handle is used to reference a local or global destination |
| 3496 | * in all UD QP post sends. |
| 3497 | */ |
Gal Pressman | b090c4e | 2018-12-12 11:09:05 +0200 | [diff] [blame] | 3498 | struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
| 3499 | u32 flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3500 | |
| 3501 | /** |
Parav Pandit | 5cda658 | 2017-10-16 08:45:12 +0300 | [diff] [blame] | 3502 | * rdma_create_user_ah - Creates an address handle for the given address vector. |
| 3503 | * It resolves destination mac address for ah attribute of RoCE type. |
| 3504 | * @pd: The protection domain associated with the address handle. |
| 3505 | * @ah_attr: The attributes of the address vector. |
| 3506 | * @udata: pointer to user's input output buffer information need by |
| 3507 | * provider driver. |
| 3508 | * |
| 3509 | * It returns 0 on success and returns appropriate error code on error. |
| 3510 | * The address handle is used to reference a local or global destination |
| 3511 | * in all UD QP post sends. |
| 3512 | */ |
| 3513 | struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, |
| 3514 | struct rdma_ah_attr *ah_attr, |
| 3515 | struct ib_udata *udata); |
| 3516 | /** |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 3517 | * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header |
| 3518 | * work completion. |
| 3519 | * @hdr: the L3 header to parse |
| 3520 | * @net_type: type of header to parse |
| 3521 | * @sgid: place to store source gid |
| 3522 | * @dgid: place to store destination gid |
| 3523 | */ |
| 3524 | int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, |
| 3525 | enum rdma_network_type net_type, |
| 3526 | union ib_gid *sgid, union ib_gid *dgid); |
| 3527 | |
| 3528 | /** |
| 3529 | * ib_get_rdma_header_version - Get the header version |
| 3530 | * @hdr: the L3 header to parse |
| 3531 | */ |
| 3532 | int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); |
| 3533 | |
| 3534 | /** |
Parav Pandit | f6bdb14 | 2017-11-14 14:52:17 +0200 | [diff] [blame] | 3535 | * ib_init_ah_attr_from_wc - Initializes address handle attributes from a |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 3536 | * work completion. |
| 3537 | * @device: Device on which the received message arrived. |
| 3538 | * @port_num: Port on which the received message arrived. |
| 3539 | * @wc: Work completion associated with the received message. |
| 3540 | * @grh: References the received global route header. This parameter is |
| 3541 | * ignored unless the work completion indicates that the GRH is valid. |
| 3542 | * @ah_attr: Returned attributes that can be used when creating an address |
| 3543 | * handle for replying to the message. |
Parav Pandit | b740321 | 2018-06-19 10:59:14 +0300 | [diff] [blame] | 3544 | * When ib_init_ah_attr_from_wc() returns success, |
| 3545 | * (a) for IB link layer it optionally contains a reference to SGID attribute |
| 3546 | * when GRH is present for IB link layer. |
| 3547 | * (b) for RoCE link layer it contains a reference to SGID attribute. |
| 3548 | * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID |
| 3549 | * attributes which are initialized using ib_init_ah_attr_from_wc(). |
| 3550 | * |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 3551 | */ |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3552 | int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, |
Parav Pandit | f6bdb14 | 2017-11-14 14:52:17 +0200 | [diff] [blame] | 3553 | const struct ib_wc *wc, const struct ib_grh *grh, |
| 3554 | struct rdma_ah_attr *ah_attr); |
Sean Hefty | 4e00d69 | 2006-06-17 20:37:39 -0700 | [diff] [blame] | 3555 | |
| 3556 | /** |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 3557 | * ib_create_ah_from_wc - Creates an address handle associated with the |
| 3558 | * sender of the specified work completion. |
| 3559 | * @pd: The protection domain associated with the address handle. |
| 3560 | * @wc: Work completion information associated with a received message. |
| 3561 | * @grh: References the received global route header. This parameter is |
| 3562 | * ignored unless the work completion indicates that the GRH is valid. |
| 3563 | * @port_num: The outbound port number to associate with the address. |
| 3564 | * |
| 3565 | * The address handle is used to reference a local or global destination |
| 3566 | * in all UD QP post sends. |
| 3567 | */ |
Ira Weiny | 73cdaae | 2015-05-31 17:15:31 -0400 | [diff] [blame] | 3568 | struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 3569 | const struct ib_grh *grh, u32 port_num); |
Hal Rosenstock | 513789e | 2005-07-27 11:45:34 -0700 | [diff] [blame] | 3570 | |
| 3571 | /** |
Dasaratharaman Chandramouli | 67b985b | 2017-04-29 14:41:20 -0400 | [diff] [blame] | 3572 | * rdma_modify_ah - Modifies the address vector associated with an address |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3573 | * handle. |
| 3574 | * @ah: The address handle to modify. |
| 3575 | * @ah_attr: The new address vector attributes to associate with the |
| 3576 | * address handle. |
| 3577 | */ |
Dasaratharaman Chandramouli | 67b985b | 2017-04-29 14:41:20 -0400 | [diff] [blame] | 3578 | int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3579 | |
| 3580 | /** |
Dasaratharaman Chandramouli | bfbfd66 | 2017-04-29 14:41:21 -0400 | [diff] [blame] | 3581 | * rdma_query_ah - Queries the address vector associated with an address |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3582 | * handle. |
| 3583 | * @ah: The address handle to query. |
| 3584 | * @ah_attr: The address vector attributes associated with the address |
| 3585 | * handle. |
| 3586 | */ |
Dasaratharaman Chandramouli | bfbfd66 | 2017-04-29 14:41:21 -0400 | [diff] [blame] | 3587 | int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3588 | |
Gal Pressman | 2553ba2 | 2018-12-12 11:09:06 +0200 | [diff] [blame] | 3589 | enum rdma_destroy_ah_flags { |
| 3590 | /* In a sleepable context */ |
| 3591 | RDMA_DESTROY_AH_SLEEPABLE = BIT(0), |
| 3592 | }; |
| 3593 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3594 | /** |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3595 | * rdma_destroy_ah_user - Destroys an address handle. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3596 | * @ah: The address handle to destroy. |
Gal Pressman | 2553ba2 | 2018-12-12 11:09:06 +0200 | [diff] [blame] | 3597 | * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3598 | * @udata: Valid user data or NULL for kernel objects |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3599 | */ |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3600 | int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); |
| 3601 | |
| 3602 | /** |
| 3603 | * rdma_destroy_ah - Destroys an kernel address handle. |
| 3604 | * @ah: The address handle to destroy. |
| 3605 | * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). |
| 3606 | * |
| 3607 | * NOTE: for user ah use rdma_destroy_ah_user with valid udata! |
| 3608 | */ |
Leon Romanovsky | 9a9ebf8 | 2020-09-07 15:09:14 +0300 | [diff] [blame] | 3609 | static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags) |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3610 | { |
Leon Romanovsky | 9a9ebf8 | 2020-09-07 15:09:14 +0300 | [diff] [blame] | 3611 | int ret = rdma_destroy_ah_user(ah, flags, NULL); |
| 3612 | |
| 3613 | WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail"); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3614 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3615 | |
Jason Gunthorpe | b0810b0 | 2020-05-06 11:24:39 +0300 | [diff] [blame] | 3616 | struct ib_srq *ib_create_srq_user(struct ib_pd *pd, |
| 3617 | struct ib_srq_init_attr *srq_init_attr, |
| 3618 | struct ib_usrq_object *uobject, |
| 3619 | struct ib_udata *udata); |
| 3620 | static inline struct ib_srq * |
| 3621 | ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr) |
| 3622 | { |
| 3623 | if (!pd->device->ops.create_srq) |
| 3624 | return ERR_PTR(-EOPNOTSUPP); |
| 3625 | |
| 3626 | return ib_create_srq_user(pd, srq_init_attr, NULL, NULL); |
| 3627 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 3628 | |
| 3629 | /** |
| 3630 | * ib_modify_srq - Modifies the attributes for the specified SRQ. |
| 3631 | * @srq: The SRQ to modify. |
| 3632 | * @srq_attr: On input, specifies the SRQ attributes to modify. On output, |
| 3633 | * the current values of selected SRQ attributes are returned. |
| 3634 | * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ |
| 3635 | * are being modified. |
| 3636 | * |
| 3637 | * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or |
| 3638 | * IB_SRQ_LIMIT to set the SRQ's limit and request notification when |
| 3639 | * the number of receives queued drops below the limit. |
| 3640 | */ |
| 3641 | int ib_modify_srq(struct ib_srq *srq, |
| 3642 | struct ib_srq_attr *srq_attr, |
| 3643 | enum ib_srq_attr_mask srq_attr_mask); |
| 3644 | |
| 3645 | /** |
| 3646 | * ib_query_srq - Returns the attribute list and current values for the |
| 3647 | * specified SRQ. |
| 3648 | * @srq: The SRQ to query. |
| 3649 | * @srq_attr: The attributes of the specified SRQ. |
| 3650 | */ |
| 3651 | int ib_query_srq(struct ib_srq *srq, |
| 3652 | struct ib_srq_attr *srq_attr); |
| 3653 | |
| 3654 | /** |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3655 | * ib_destroy_srq_user - Destroys the specified SRQ. |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 3656 | * @srq: The SRQ to destroy. |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3657 | * @udata: Valid user data or NULL for kernel objects |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 3658 | */ |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3659 | int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); |
| 3660 | |
| 3661 | /** |
| 3662 | * ib_destroy_srq - Destroys the specified kernel SRQ. |
| 3663 | * @srq: The SRQ to destroy. |
| 3664 | * |
| 3665 | * NOTE: for user srq use ib_destroy_srq_user with valid udata! |
| 3666 | */ |
Leon Romanovsky | 119181d | 2020-09-07 15:09:16 +0300 | [diff] [blame] | 3667 | static inline void ib_destroy_srq(struct ib_srq *srq) |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3668 | { |
Leon Romanovsky | 119181d | 2020-09-07 15:09:16 +0300 | [diff] [blame] | 3669 | int ret = ib_destroy_srq_user(srq, NULL); |
| 3670 | |
| 3671 | WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail"); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3672 | } |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 3673 | |
| 3674 | /** |
| 3675 | * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. |
| 3676 | * @srq: The SRQ to post the work request on. |
| 3677 | * @recv_wr: A list of work requests to post on the receive queue. |
| 3678 | * @bad_recv_wr: On an immediate failure, this parameter will reference |
| 3679 | * the work request that failed to be posted on the QP. |
| 3680 | */ |
| 3681 | static inline int ib_post_srq_recv(struct ib_srq *srq, |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 3682 | const struct ib_recv_wr *recv_wr, |
| 3683 | const struct ib_recv_wr **bad_recv_wr) |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 3684 | { |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 3685 | const struct ib_recv_wr *dummy; |
Bart Van Assche | bb039a8 | 2018-07-18 09:25:16 -0700 | [diff] [blame] | 3686 | |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 3687 | return srq->device->ops.post_srq_recv(srq, recv_wr, |
| 3688 | bad_recv_wr ? : &dummy); |
Roland Dreier | d41fcc6 | 2005-08-18 12:23:08 -0700 | [diff] [blame] | 3689 | } |
| 3690 | |
Leon Romanovsky | 8da9fe4 | 2021-08-03 21:20:35 +0300 | [diff] [blame] | 3691 | struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd, |
| 3692 | struct ib_qp_init_attr *qp_init_attr, |
| 3693 | const char *caller); |
| 3694 | /** |
| 3695 | * ib_create_qp - Creates a kernel QP associated with the specific protection |
| 3696 | * domain. |
| 3697 | * @pd: The protection domain associated with the QP. |
| 3698 | * @init_attr: A list of initial attributes required to create the |
| 3699 | * QP. If QP creation succeeds, then the attributes are updated to |
| 3700 | * the actual capabilities of the created QP. |
| 3701 | */ |
Leon Romanovsky | 66f57b8 | 2020-11-17 09:01:48 +0200 | [diff] [blame] | 3702 | static inline struct ib_qp *ib_create_qp(struct ib_pd *pd, |
| 3703 | struct ib_qp_init_attr *init_attr) |
| 3704 | { |
Leon Romanovsky | 8da9fe4 | 2021-08-03 21:20:35 +0300 | [diff] [blame] | 3705 | return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME); |
Leon Romanovsky | 66f57b8 | 2020-11-17 09:01:48 +0200 | [diff] [blame] | 3706 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3707 | |
| 3708 | /** |
Parav Pandit | a512c2f | 2017-05-23 11:26:08 +0300 | [diff] [blame] | 3709 | * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. |
| 3710 | * @qp: The QP to modify. |
| 3711 | * @attr: On input, specifies the QP attributes to modify. On output, |
| 3712 | * the current values of selected QP attributes are returned. |
| 3713 | * @attr_mask: A bit-mask used to specify which attributes of the QP |
| 3714 | * are being modified. |
| 3715 | * @udata: pointer to user's input output buffer information |
| 3716 | * are being modified. |
| 3717 | * It returns 0 on success and returns appropriate error code on error. |
| 3718 | */ |
| 3719 | int ib_modify_qp_with_udata(struct ib_qp *qp, |
| 3720 | struct ib_qp_attr *attr, |
| 3721 | int attr_mask, |
| 3722 | struct ib_udata *udata); |
| 3723 | |
| 3724 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3725 | * ib_modify_qp - Modifies the attributes for the specified QP and then |
| 3726 | * transitions the QP to the given state. |
| 3727 | * @qp: The QP to modify. |
| 3728 | * @qp_attr: On input, specifies the QP attributes to modify. On output, |
| 3729 | * the current values of selected QP attributes are returned. |
| 3730 | * @qp_attr_mask: A bit-mask used to specify which attributes of the QP |
| 3731 | * are being modified. |
| 3732 | */ |
| 3733 | int ib_modify_qp(struct ib_qp *qp, |
| 3734 | struct ib_qp_attr *qp_attr, |
| 3735 | int qp_attr_mask); |
| 3736 | |
| 3737 | /** |
| 3738 | * ib_query_qp - Returns the attribute list and current values for the |
| 3739 | * specified QP. |
| 3740 | * @qp: The QP to query. |
| 3741 | * @qp_attr: The attributes of the specified QP. |
| 3742 | * @qp_attr_mask: A bit-mask used to select specific attributes to query. |
| 3743 | * @qp_init_attr: Additional attributes of the selected QP. |
| 3744 | * |
| 3745 | * The qp_attr_mask may be used to limit the query to gathering only the |
| 3746 | * selected attributes. |
| 3747 | */ |
| 3748 | int ib_query_qp(struct ib_qp *qp, |
| 3749 | struct ib_qp_attr *qp_attr, |
| 3750 | int qp_attr_mask, |
| 3751 | struct ib_qp_init_attr *qp_init_attr); |
| 3752 | |
| 3753 | /** |
| 3754 | * ib_destroy_qp - Destroys the specified QP. |
| 3755 | * @qp: The QP to destroy. |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3756 | * @udata: Valid udata or NULL for kernel objects |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3757 | */ |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3758 | int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata); |
| 3759 | |
| 3760 | /** |
| 3761 | * ib_destroy_qp - Destroys the specified kernel QP. |
| 3762 | * @qp: The QP to destroy. |
| 3763 | * |
| 3764 | * NOTE: for user qp use ib_destroy_qp_user with valid udata! |
| 3765 | */ |
| 3766 | static inline int ib_destroy_qp(struct ib_qp *qp) |
| 3767 | { |
| 3768 | return ib_destroy_qp_user(qp, NULL); |
| 3769 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3770 | |
| 3771 | /** |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 3772 | * ib_open_qp - Obtain a reference to an existing sharable QP. |
| 3773 | * @xrcd - XRC domain |
| 3774 | * @qp_open_attr: Attributes identifying the QP to open. |
| 3775 | * |
| 3776 | * Returns a reference to a sharable QP. |
| 3777 | */ |
| 3778 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, |
| 3779 | struct ib_qp_open_attr *qp_open_attr); |
| 3780 | |
| 3781 | /** |
| 3782 | * ib_close_qp - Release an external reference to a QP. |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 3783 | * @qp: The QP handle to release |
| 3784 | * |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 3785 | * The opened QP handle is released by the caller. The underlying |
| 3786 | * shared QP is not destroyed until all internal references are released. |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 3787 | */ |
Sean Hefty | 0e0ec7e | 2011-08-08 15:31:51 -0700 | [diff] [blame] | 3788 | int ib_close_qp(struct ib_qp *qp); |
Sean Hefty | d3d72d9 | 2011-05-26 23:06:44 -0700 | [diff] [blame] | 3789 | |
| 3790 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3791 | * ib_post_send - Posts a list of work requests to the send queue of |
| 3792 | * the specified QP. |
| 3793 | * @qp: The QP to post the work request on. |
| 3794 | * @send_wr: A list of work requests to post on the send queue. |
| 3795 | * @bad_send_wr: On an immediate failure, this parameter will reference |
| 3796 | * the work request that failed to be posted on the QP. |
Bart Van Assche | 55464d4 | 2009-12-09 14:20:04 -0800 | [diff] [blame] | 3797 | * |
| 3798 | * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate |
| 3799 | * error is returned, the QP state shall not be affected, |
| 3800 | * ib_post_send() will return an immediate error after queueing any |
| 3801 | * earlier work requests in the list. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3802 | */ |
| 3803 | static inline int ib_post_send(struct ib_qp *qp, |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 3804 | const struct ib_send_wr *send_wr, |
| 3805 | const struct ib_send_wr **bad_send_wr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3806 | { |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 3807 | const struct ib_send_wr *dummy; |
Bart Van Assche | bb039a8 | 2018-07-18 09:25:16 -0700 | [diff] [blame] | 3808 | |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 3809 | return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3810 | } |
| 3811 | |
| 3812 | /** |
| 3813 | * ib_post_recv - Posts a list of work requests to the receive queue of |
| 3814 | * the specified QP. |
| 3815 | * @qp: The QP to post the work request on. |
| 3816 | * @recv_wr: A list of work requests to post on the receive queue. |
| 3817 | * @bad_recv_wr: On an immediate failure, this parameter will reference |
| 3818 | * the work request that failed to be posted on the QP. |
| 3819 | */ |
| 3820 | static inline int ib_post_recv(struct ib_qp *qp, |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 3821 | const struct ib_recv_wr *recv_wr, |
| 3822 | const struct ib_recv_wr **bad_recv_wr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3823 | { |
Bart Van Assche | d34ac5c | 2018-07-18 09:25:32 -0700 | [diff] [blame] | 3824 | const struct ib_recv_wr *dummy; |
Bart Van Assche | bb039a8 | 2018-07-18 09:25:16 -0700 | [diff] [blame] | 3825 | |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 3826 | return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3827 | } |
| 3828 | |
Leon Romanovsky | 7e3c66c | 2020-09-07 15:09:17 +0300 | [diff] [blame] | 3829 | struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, |
| 3830 | int comp_vector, enum ib_poll_context poll_ctx, |
| 3831 | const char *caller); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3832 | static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, |
| 3833 | int nr_cqe, int comp_vector, |
| 3834 | enum ib_poll_context poll_ctx) |
| 3835 | { |
Leon Romanovsky | 7e3c66c | 2020-09-07 15:09:17 +0300 | [diff] [blame] | 3836 | return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, |
| 3837 | KBUILD_MODNAME); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3838 | } |
| 3839 | |
Chuck Lever | 20cf4e0 | 2019-07-29 13:22:09 -0400 | [diff] [blame] | 3840 | struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, |
| 3841 | int nr_cqe, enum ib_poll_context poll_ctx, |
| 3842 | const char *caller); |
| 3843 | |
| 3844 | /** |
| 3845 | * ib_alloc_cq_any: Allocate kernel CQ |
| 3846 | * @dev: The IB device |
| 3847 | * @private: Private data attached to the CQE |
| 3848 | * @nr_cqe: Number of CQEs in the CQ |
| 3849 | * @poll_ctx: Context used for polling the CQ |
| 3850 | */ |
| 3851 | static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, |
| 3852 | void *private, int nr_cqe, |
| 3853 | enum ib_poll_context poll_ctx) |
| 3854 | { |
| 3855 | return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, |
| 3856 | KBUILD_MODNAME); |
| 3857 | } |
| 3858 | |
Leon Romanovsky | 7e3c66c | 2020-09-07 15:09:17 +0300 | [diff] [blame] | 3859 | void ib_free_cq(struct ib_cq *cq); |
Christoph Hellwig | 14d3a3b | 2015-12-11 11:53:03 -0800 | [diff] [blame] | 3860 | int ib_process_cq_direct(struct ib_cq *cq, int budget); |
| 3861 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3862 | /** |
| 3863 | * ib_create_cq - Creates a CQ on the specified device. |
| 3864 | * @device: The device on which to create the CQ. |
| 3865 | * @comp_handler: A user-specified callback that is invoked when a |
| 3866 | * completion event occurs on the CQ. |
| 3867 | * @event_handler: A user-specified callback that is invoked when an |
| 3868 | * asynchronous event not associated with a completion occurs on the CQ. |
| 3869 | * @cq_context: Context associated with the CQ returned to the user via |
| 3870 | * the associated completion and event handlers. |
Matan Barak | 8e37210 | 2015-06-11 16:35:21 +0300 | [diff] [blame] | 3871 | * @cq_attr: The attributes the CQ should be created upon. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3872 | * |
| 3873 | * Users can examine the cq structure to determine the actual CQ size. |
| 3874 | */ |
Bharat Potnuri | 7350cdd | 2018-06-15 20:52:33 +0530 | [diff] [blame] | 3875 | struct ib_cq *__ib_create_cq(struct ib_device *device, |
| 3876 | ib_comp_handler comp_handler, |
| 3877 | void (*event_handler)(struct ib_event *, void *), |
| 3878 | void *cq_context, |
| 3879 | const struct ib_cq_init_attr *cq_attr, |
| 3880 | const char *caller); |
| 3881 | #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ |
| 3882 | __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3883 | |
| 3884 | /** |
| 3885 | * ib_resize_cq - Modifies the capacity of the CQ. |
| 3886 | * @cq: The CQ to resize. |
| 3887 | * @cqe: The minimum size of the CQ. |
| 3888 | * |
| 3889 | * Users can examine the cq structure to determine the actual CQ size. |
| 3890 | */ |
| 3891 | int ib_resize_cq(struct ib_cq *cq, int cqe); |
| 3892 | |
| 3893 | /** |
Leon Romanovsky | 4190b4e | 2017-11-13 10:51:19 +0200 | [diff] [blame] | 3894 | * rdma_set_cq_moderation - Modifies moderation params of the CQ |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 3895 | * @cq: The CQ to modify. |
| 3896 | * @cq_count: number of CQEs that will trigger an event |
| 3897 | * @cq_period: max period of time in usec before triggering an event |
| 3898 | * |
| 3899 | */ |
Leon Romanovsky | 4190b4e | 2017-11-13 10:51:19 +0200 | [diff] [blame] | 3900 | int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
Eli Cohen | 2dd5716 | 2008-04-16 21:09:33 -0700 | [diff] [blame] | 3901 | |
| 3902 | /** |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3903 | * ib_destroy_cq_user - Destroys the specified CQ. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3904 | * @cq: The CQ to destroy. |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3905 | * @udata: Valid user data or NULL for kernel objects |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3906 | */ |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3907 | int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); |
| 3908 | |
| 3909 | /** |
| 3910 | * ib_destroy_cq - Destroys the specified kernel CQ. |
| 3911 | * @cq: The CQ to destroy. |
| 3912 | * |
| 3913 | * NOTE: for user cq use ib_destroy_cq_user with valid udata! |
| 3914 | */ |
Leon Romanovsky | 890ac8d | 2019-05-20 09:54:21 +0300 | [diff] [blame] | 3915 | static inline void ib_destroy_cq(struct ib_cq *cq) |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3916 | { |
Leon Romanovsky | 43d781b | 2020-09-07 15:09:18 +0300 | [diff] [blame] | 3917 | int ret = ib_destroy_cq_user(cq, NULL); |
| 3918 | |
| 3919 | WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 3920 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3921 | |
| 3922 | /** |
| 3923 | * ib_poll_cq - poll a CQ for completion(s) |
| 3924 | * @cq:the CQ being polled |
| 3925 | * @num_entries:maximum number of completions to return |
| 3926 | * @wc:array of at least @num_entries &struct ib_wc where completions |
| 3927 | * will be returned |
| 3928 | * |
| 3929 | * Poll a CQ for (possibly multiple) completions. If the return value |
| 3930 | * is < 0, an error occurred. If the return value is >= 0, it is the |
| 3931 | * number of completions returned. If the return value is |
| 3932 | * non-negative and < num_entries, then the CQ was emptied. |
| 3933 | */ |
| 3934 | static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, |
| 3935 | struct ib_wc *wc) |
| 3936 | { |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 3937 | return cq->device->ops.poll_cq(cq, num_entries, wc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3938 | } |
| 3939 | |
| 3940 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3941 | * ib_req_notify_cq - Request completion notification on a CQ. |
| 3942 | * @cq: The CQ to generate an event for. |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 3943 | * @flags: |
| 3944 | * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP |
| 3945 | * to request an event on the next solicited event or next work |
| 3946 | * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS |
| 3947 | * may also be |ed in to request a hint about missed events, as |
| 3948 | * described below. |
| 3949 | * |
| 3950 | * Return Value: |
| 3951 | * < 0 means an error occurred while requesting notification |
| 3952 | * == 0 means notification was requested successfully, and if |
| 3953 | * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events |
| 3954 | * were missed and it is safe to wait for another event. In |
| 3955 | * this case is it guaranteed that any work completions added |
| 3956 | * to the CQ since the last CQ poll will trigger a completion |
| 3957 | * notification event. |
| 3958 | * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed |
| 3959 | * in. It means that the consumer must poll the CQ again to |
| 3960 | * make sure it is empty to avoid missing an event because of a |
| 3961 | * race between requesting notification and an entry being |
| 3962 | * added to the CQ. This return value means it is possible |
| 3963 | * (but not guaranteed) that a work completion has been added |
| 3964 | * to the CQ since the last poll without triggering a |
| 3965 | * completion notification event. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3966 | */ |
| 3967 | static inline int ib_req_notify_cq(struct ib_cq *cq, |
Roland Dreier | ed23a72 | 2007-05-06 21:02:48 -0700 | [diff] [blame] | 3968 | enum ib_cq_notify_flags flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3969 | { |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 3970 | return cq->device->ops.req_notify_cq(cq, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3971 | } |
| 3972 | |
Yamin Friedman | c7ff819 | 2020-05-27 11:34:53 +0300 | [diff] [blame] | 3973 | struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, |
| 3974 | int comp_vector_hint, |
| 3975 | enum ib_poll_context poll_ctx); |
| 3976 | |
| 3977 | void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe); |
| 3978 | |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 3979 | /* |
| 3980 | * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to |
| 3981 | * NULL. This causes the ib_dma* helpers to just stash the kernel virtual |
| 3982 | * address into the dma address. |
| 3983 | */ |
| 3984 | static inline bool ib_uses_virt_dma(struct ib_device *dev) |
| 3985 | { |
| 3986 | return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device; |
| 3987 | } |
| 3988 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3989 | /** |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3990 | * ib_dma_mapping_error - check a DMA addr for error |
| 3991 | * @dev: The device for which the dma_addr was created |
| 3992 | * @dma_addr: The DMA address to check |
| 3993 | */ |
| 3994 | static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) |
| 3995 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 3996 | if (ib_uses_virt_dma(dev)) |
| 3997 | return 0; |
Bart Van Assche | 0957c29 | 2017-03-07 22:56:53 +0000 | [diff] [blame] | 3998 | return dma_mapping_error(dev->dma_device, dma_addr); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 3999 | } |
| 4000 | |
| 4001 | /** |
| 4002 | * ib_dma_map_single - Map a kernel virtual address to DMA address |
| 4003 | * @dev: The device for which the dma_addr is to be created |
| 4004 | * @cpu_addr: The kernel virtual address |
| 4005 | * @size: The size of the region in bytes |
| 4006 | * @direction: The direction of the DMA |
| 4007 | */ |
| 4008 | static inline u64 ib_dma_map_single(struct ib_device *dev, |
| 4009 | void *cpu_addr, size_t size, |
| 4010 | enum dma_data_direction direction) |
| 4011 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4012 | if (ib_uses_virt_dma(dev)) |
| 4013 | return (uintptr_t)cpu_addr; |
Bart Van Assche | 0957c29 | 2017-03-07 22:56:53 +0000 | [diff] [blame] | 4014 | return dma_map_single(dev->dma_device, cpu_addr, size, direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4015 | } |
| 4016 | |
| 4017 | /** |
| 4018 | * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() |
| 4019 | * @dev: The device for which the DMA address was created |
| 4020 | * @addr: The DMA address |
| 4021 | * @size: The size of the region in bytes |
| 4022 | * @direction: The direction of the DMA |
| 4023 | */ |
| 4024 | static inline void ib_dma_unmap_single(struct ib_device *dev, |
| 4025 | u64 addr, size_t size, |
| 4026 | enum dma_data_direction direction) |
| 4027 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4028 | if (!ib_uses_virt_dma(dev)) |
| 4029 | dma_unmap_single(dev->dma_device, addr, size, direction); |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 4030 | } |
| 4031 | |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4032 | /** |
| 4033 | * ib_dma_map_page - Map a physical page to DMA address |
| 4034 | * @dev: The device for which the dma_addr is to be created |
| 4035 | * @page: The page to be mapped |
| 4036 | * @offset: The offset within the page |
| 4037 | * @size: The size of the region in bytes |
| 4038 | * @direction: The direction of the DMA |
| 4039 | */ |
| 4040 | static inline u64 ib_dma_map_page(struct ib_device *dev, |
| 4041 | struct page *page, |
| 4042 | unsigned long offset, |
| 4043 | size_t size, |
| 4044 | enum dma_data_direction direction) |
| 4045 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4046 | if (ib_uses_virt_dma(dev)) |
| 4047 | return (uintptr_t)(page_address(page) + offset); |
Bart Van Assche | 0957c29 | 2017-03-07 22:56:53 +0000 | [diff] [blame] | 4048 | return dma_map_page(dev->dma_device, page, offset, size, direction); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4049 | } |
| 4050 | |
| 4051 | /** |
| 4052 | * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() |
| 4053 | * @dev: The device for which the DMA address was created |
| 4054 | * @addr: The DMA address |
| 4055 | * @size: The size of the region in bytes |
| 4056 | * @direction: The direction of the DMA |
| 4057 | */ |
| 4058 | static inline void ib_dma_unmap_page(struct ib_device *dev, |
| 4059 | u64 addr, size_t size, |
| 4060 | enum dma_data_direction direction) |
| 4061 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4062 | if (!ib_uses_virt_dma(dev)) |
| 4063 | dma_unmap_page(dev->dma_device, addr, size, direction); |
| 4064 | } |
| 4065 | |
| 4066 | int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents); |
| 4067 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, |
| 4068 | struct scatterlist *sg, int nents, |
| 4069 | enum dma_data_direction direction, |
| 4070 | unsigned long dma_attrs) |
| 4071 | { |
| 4072 | if (ib_uses_virt_dma(dev)) |
| 4073 | return ib_dma_virt_map_sg(dev, sg, nents); |
| 4074 | return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, |
| 4075 | dma_attrs); |
| 4076 | } |
| 4077 | |
| 4078 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, |
| 4079 | struct scatterlist *sg, int nents, |
| 4080 | enum dma_data_direction direction, |
| 4081 | unsigned long dma_attrs) |
| 4082 | { |
| 4083 | if (!ib_uses_virt_dma(dev)) |
| 4084 | dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, |
| 4085 | dma_attrs); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4086 | } |
| 4087 | |
| 4088 | /** |
Maor Gottlieb | 79fbd3e | 2021-08-24 17:25:31 +0300 | [diff] [blame] | 4089 | * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses |
| 4090 | * @dev: The device for which the DMA addresses are to be created |
| 4091 | * @sg: The sg_table object describing the buffer |
| 4092 | * @direction: The direction of the DMA |
| 4093 | * @attrs: Optional DMA attributes for the map operation |
| 4094 | */ |
| 4095 | static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev, |
| 4096 | struct sg_table *sgt, |
| 4097 | enum dma_data_direction direction, |
| 4098 | unsigned long dma_attrs) |
| 4099 | { |
| 4100 | if (ib_uses_virt_dma(dev)) { |
| 4101 | ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents); |
| 4102 | return 0; |
| 4103 | } |
| 4104 | return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs); |
| 4105 | } |
| 4106 | |
| 4107 | static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev, |
| 4108 | struct sg_table *sgt, |
| 4109 | enum dma_data_direction direction, |
| 4110 | unsigned long dma_attrs) |
| 4111 | { |
| 4112 | if (!ib_uses_virt_dma(dev)) |
| 4113 | dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs); |
| 4114 | } |
| 4115 | |
| 4116 | /** |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4117 | * ib_dma_map_sg - Map a scatter/gather list to DMA addresses |
| 4118 | * @dev: The device for which the DMA addresses are to be created |
| 4119 | * @sg: The array of scatter/gather entries |
| 4120 | * @nents: The number of scatter/gather entries |
| 4121 | * @direction: The direction of the DMA |
| 4122 | */ |
| 4123 | static inline int ib_dma_map_sg(struct ib_device *dev, |
| 4124 | struct scatterlist *sg, int nents, |
| 4125 | enum dma_data_direction direction) |
| 4126 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4127 | return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4128 | } |
| 4129 | |
| 4130 | /** |
| 4131 | * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses |
| 4132 | * @dev: The device for which the DMA addresses were created |
| 4133 | * @sg: The array of scatter/gather entries |
| 4134 | * @nents: The number of scatter/gather entries |
| 4135 | * @direction: The direction of the DMA |
| 4136 | */ |
| 4137 | static inline void ib_dma_unmap_sg(struct ib_device *dev, |
| 4138 | struct scatterlist *sg, int nents, |
| 4139 | enum dma_data_direction direction) |
| 4140 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4141 | ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0); |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 4142 | } |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4143 | |
| 4144 | /** |
Bart Van Assche | 0b5cb33 | 2019-01-22 10:25:20 -0800 | [diff] [blame] | 4145 | * ib_dma_max_seg_size - Return the size limit of a single DMA transfer |
| 4146 | * @dev: The device to query |
| 4147 | * |
| 4148 | * The returned value represents a size in bytes. |
| 4149 | */ |
| 4150 | static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev) |
| 4151 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4152 | if (ib_uses_virt_dma(dev)) |
| 4153 | return UINT_MAX; |
Bart Van Assche | ecdfdfd | 2019-10-25 15:58:27 -0700 | [diff] [blame] | 4154 | return dma_get_max_seg_size(dev->dma_device); |
Bart Van Assche | 0b5cb33 | 2019-01-22 10:25:20 -0800 | [diff] [blame] | 4155 | } |
| 4156 | |
| 4157 | /** |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4158 | * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU |
| 4159 | * @dev: The device for which the DMA address was created |
| 4160 | * @addr: The DMA address |
| 4161 | * @size: The size of the region in bytes |
| 4162 | * @dir: The direction of the DMA |
| 4163 | */ |
| 4164 | static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, |
| 4165 | u64 addr, |
| 4166 | size_t size, |
| 4167 | enum dma_data_direction dir) |
| 4168 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4169 | if (!ib_uses_virt_dma(dev)) |
| 4170 | dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4171 | } |
| 4172 | |
| 4173 | /** |
| 4174 | * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device |
| 4175 | * @dev: The device for which the DMA address was created |
| 4176 | * @addr: The DMA address |
| 4177 | * @size: The size of the region in bytes |
| 4178 | * @dir: The direction of the DMA |
| 4179 | */ |
| 4180 | static inline void ib_dma_sync_single_for_device(struct ib_device *dev, |
| 4181 | u64 addr, |
| 4182 | size_t size, |
| 4183 | enum dma_data_direction dir) |
| 4184 | { |
Christoph Hellwig | 5a7a9e0 | 2020-11-06 19:19:38 +0100 | [diff] [blame] | 4185 | if (!ib_uses_virt_dma(dev)) |
| 4186 | dma_sync_single_for_device(dev->dma_device, addr, size, dir); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4187 | } |
| 4188 | |
Moni Shoua | 33006bd | 2020-01-15 14:43:32 +0200 | [diff] [blame] | 4189 | /* ib_reg_user_mr - register a memory region for virtual addresses from kernel |
| 4190 | * space. This function should be called when 'current' is the owning MM. |
| 4191 | */ |
| 4192 | struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
| 4193 | u64 virt_addr, int mr_access_flags); |
| 4194 | |
Moni Shoua | 87d8069f | 2020-01-15 14:43:33 +0200 | [diff] [blame] | 4195 | /* ib_advise_mr - give an advice about an address range in a memory region */ |
| 4196 | int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, |
| 4197 | u32 flags, struct ib_sge *sg_list, u32 num_sge); |
Ralph Campbell | 9b51309 | 2006-12-12 14:27:41 -0800 | [diff] [blame] | 4198 | /** |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 4199 | * ib_dereg_mr_user - Deregisters a memory region and removes it from the |
| 4200 | * HCA translation table. |
| 4201 | * @mr: The memory region to deregister. |
| 4202 | * @udata: Valid user data or NULL for kernel object |
| 4203 | * |
| 4204 | * This function can fail, if the memory region has memory windows bound to it. |
| 4205 | */ |
| 4206 | int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata); |
| 4207 | |
| 4208 | /** |
| 4209 | * ib_dereg_mr - Deregisters a kernel memory region and removes it from the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4210 | * HCA translation table. |
| 4211 | * @mr: The memory region to deregister. |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 4212 | * |
| 4213 | * This function can fail, if the memory region has memory windows bound to it. |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 4214 | * |
| 4215 | * NOTE: for user mr use ib_dereg_mr_user with valid udata! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4216 | */ |
Shamir Rabinovitch | c4367a2 | 2019-03-31 19:10:05 +0300 | [diff] [blame] | 4217 | static inline int ib_dereg_mr(struct ib_mr *mr) |
| 4218 | { |
| 4219 | return ib_dereg_mr_user(mr, NULL); |
| 4220 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4221 | |
Gal Pressman | b64b74b | 2020-07-06 15:03:42 +0300 | [diff] [blame] | 4222 | struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
| 4223 | u32 max_num_sg); |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 4224 | |
Israel Rukshin | 26bc7ea | 2019-06-11 18:52:39 +0300 | [diff] [blame] | 4225 | struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, |
| 4226 | u32 max_num_data_sg, |
| 4227 | u32 max_num_meta_sg); |
| 4228 | |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 4229 | /** |
Steve Wise | 00f7ec3 | 2008-07-14 23:48:45 -0700 | [diff] [blame] | 4230 | * ib_update_fast_reg_key - updates the key portion of the fast_reg MR |
| 4231 | * R_Key and L_Key. |
| 4232 | * @mr - struct ib_mr pointer to be updated. |
| 4233 | * @newkey - new key to be used. |
| 4234 | */ |
| 4235 | static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) |
| 4236 | { |
| 4237 | mr->lkey = (mr->lkey & 0xffffff00) | newkey; |
| 4238 | mr->rkey = (mr->rkey & 0xffffff00) | newkey; |
| 4239 | } |
| 4240 | |
| 4241 | /** |
Shani Michaeli | 7083e42 | 2013-02-06 16:19:12 +0000 | [diff] [blame] | 4242 | * ib_inc_rkey - increments the key portion of the given rkey. Can be used |
| 4243 | * for calculating a new rkey for type 2 memory windows. |
| 4244 | * @rkey - the rkey to increment. |
| 4245 | */ |
| 4246 | static inline u32 ib_inc_rkey(u32 rkey) |
| 4247 | { |
| 4248 | const u32 mask = 0x000000ff; |
| 4249 | return ((rkey + 1) & mask) | (rkey & ~mask); |
| 4250 | } |
| 4251 | |
| 4252 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4253 | * ib_attach_mcast - Attaches the specified QP to a multicast group. |
| 4254 | * @qp: QP to attach to the multicast group. The QP must be type |
| 4255 | * IB_QPT_UD. |
| 4256 | * @gid: Multicast group GID. |
| 4257 | * @lid: Multicast group LID in host byte order. |
| 4258 | * |
| 4259 | * In order to send and receive multicast packets, subnet |
| 4260 | * administration must have created the multicast group and configured |
| 4261 | * the fabric appropriately. The port associated with the specified |
| 4262 | * QP must also be a member of the multicast group. |
| 4263 | */ |
| 4264 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
| 4265 | |
| 4266 | /** |
| 4267 | * ib_detach_mcast - Detaches the specified QP from a multicast group. |
| 4268 | * @qp: QP to detach from the multicast group. |
| 4269 | * @gid: Multicast group GID. |
| 4270 | * @lid: Multicast group LID in host byte order. |
| 4271 | */ |
| 4272 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); |
| 4273 | |
Maor Gottlieb | b73efcb | 2020-07-06 15:27:15 +0300 | [diff] [blame] | 4274 | struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, |
| 4275 | struct inode *inode, struct ib_udata *udata); |
| 4276 | int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata); |
Sean Hefty | 59991f9 | 2011-05-23 17:52:46 -0700 | [diff] [blame] | 4277 | |
Jason Gunthorpe | adac4cb | 2020-11-30 09:58:36 +0200 | [diff] [blame] | 4278 | static inline int ib_check_mr_access(struct ib_device *ib_dev, |
| 4279 | unsigned int flags) |
Eli Cohen | 1c636f8 | 2013-10-31 15:26:32 +0200 | [diff] [blame] | 4280 | { |
| 4281 | /* |
| 4282 | * Local write permission is required if remote write or |
| 4283 | * remote atomic permission is also requested. |
| 4284 | */ |
| 4285 | if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && |
| 4286 | !(flags & IB_ACCESS_LOCAL_WRITE)) |
| 4287 | return -EINVAL; |
| 4288 | |
Michael Guralnik | ca95c14 | 2020-01-08 20:05:35 +0200 | [diff] [blame] | 4289 | if (flags & ~IB_ACCESS_SUPPORTED) |
| 4290 | return -EINVAL; |
| 4291 | |
Jason Gunthorpe | adac4cb | 2020-11-30 09:58:36 +0200 | [diff] [blame] | 4292 | if (flags & IB_ACCESS_ON_DEMAND && |
| 4293 | !(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) |
| 4294 | return -EINVAL; |
Eli Cohen | 1c636f8 | 2013-10-31 15:26:32 +0200 | [diff] [blame] | 4295 | return 0; |
| 4296 | } |
| 4297 | |
Jack Morgenstein | 08bb558 | 2018-05-23 15:30:30 +0300 | [diff] [blame] | 4298 | static inline bool ib_access_writable(int access_flags) |
| 4299 | { |
| 4300 | /* |
| 4301 | * We have writable memory backing the MR if any of the following |
| 4302 | * access flags are set. "Local write" and "remote write" obviously |
| 4303 | * require write access. "Remote atomic" can do things like fetch and |
| 4304 | * add, which will modify memory, and "MW bind" can change permissions |
| 4305 | * by binding a window. |
| 4306 | */ |
| 4307 | return access_flags & |
| 4308 | (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | |
| 4309 | IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); |
| 4310 | } |
| 4311 | |
Sagi Grimberg | 1b01d33 | 2014-02-23 14:19:05 +0200 | [diff] [blame] | 4312 | /** |
| 4313 | * ib_check_mr_status: lightweight check of MR status. |
| 4314 | * This routine may provide status checks on a selected |
| 4315 | * ib_mr. first use is for signature status check. |
| 4316 | * |
| 4317 | * @mr: A memory region. |
| 4318 | * @check_mask: Bitmask of which checks to perform from |
| 4319 | * ib_mr_status_check enumeration. |
| 4320 | * @mr_status: The container of relevant status checks. |
| 4321 | * failed checks will be indicated in the status bitmask |
| 4322 | * and the relevant info shall be in the error item. |
| 4323 | */ |
| 4324 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
| 4325 | struct ib_mr_status *mr_status); |
| 4326 | |
Jason Gunthorpe | d79af72 | 2019-01-10 14:02:24 -0700 | [diff] [blame] | 4327 | /** |
| 4328 | * ib_device_try_get: Hold a registration lock |
| 4329 | * device: The device to lock |
| 4330 | * |
| 4331 | * A device under an active registration lock cannot become unregistered. It |
| 4332 | * is only possible to obtain a registration lock on a device that is fully |
| 4333 | * registered, otherwise this function returns false. |
| 4334 | * |
| 4335 | * The registration lock is only necessary for actions which require the |
| 4336 | * device to still be registered. Uses that only require the device pointer to |
| 4337 | * be valid should use get_device(&ibdev->dev) to hold the memory. |
| 4338 | * |
| 4339 | */ |
| 4340 | static inline bool ib_device_try_get(struct ib_device *dev) |
| 4341 | { |
| 4342 | return refcount_inc_not_zero(&dev->refcount); |
| 4343 | } |
| 4344 | |
| 4345 | void ib_device_put(struct ib_device *device); |
Jason Gunthorpe | 324e227 | 2019-02-12 21:12:51 -0700 | [diff] [blame] | 4346 | struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, |
| 4347 | enum rdma_driver_id driver_id); |
| 4348 | struct ib_device *ib_device_get_by_name(const char *name, |
| 4349 | enum rdma_driver_id driver_id); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 4350 | struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port, |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 4351 | u16 pkey, const union ib_gid *gid, |
| 4352 | const struct sockaddr *addr); |
Jason Gunthorpe | c2261dd | 2019-02-12 21:12:50 -0700 | [diff] [blame] | 4353 | int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, |
| 4354 | unsigned int port); |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 4355 | struct net_device *ib_device_netdev(struct ib_device *dev, u32 port); |
Jason Gunthorpe | c2261dd | 2019-02-12 21:12:50 -0700 | [diff] [blame] | 4356 | |
Yishai Hadas | 5fd251c | 2016-05-23 15:20:48 +0300 | [diff] [blame] | 4357 | struct ib_wq *ib_create_wq(struct ib_pd *pd, |
| 4358 | struct ib_wq_init_attr *init_attr); |
Leon Romanovsky | add5353 | 2020-09-07 15:09:20 +0300 | [diff] [blame] | 4359 | int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); |
Yotam Kenneth | 9268f72 | 2015-07-30 17:50:15 +0300 | [diff] [blame] | 4360 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 4361 | int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 4362 | unsigned int *sg_offset, unsigned int page_size); |
Max Gurtovoy | 2cdfcdd | 2019-06-11 18:52:40 +0300 | [diff] [blame] | 4363 | int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, |
| 4364 | int data_sg_nents, unsigned int *data_sg_offset, |
| 4365 | struct scatterlist *meta_sg, int meta_sg_nents, |
| 4366 | unsigned int *meta_sg_offset, unsigned int page_size); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 4367 | |
| 4368 | static inline int |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 4369 | ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 4370 | unsigned int *sg_offset, unsigned int page_size) |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 4371 | { |
| 4372 | int n; |
| 4373 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 4374 | n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 4375 | mr->iova = 0; |
| 4376 | |
| 4377 | return n; |
| 4378 | } |
| 4379 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 4380 | int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 4381 | unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); |
Sagi Grimberg | 4c67e2b | 2015-10-13 19:11:24 +0300 | [diff] [blame] | 4382 | |
Steve Wise | 765d677 | 2016-02-17 08:15:41 -0800 | [diff] [blame] | 4383 | void ib_drain_rq(struct ib_qp *qp); |
| 4384 | void ib_drain_sq(struct ib_qp *qp); |
| 4385 | void ib_drain_qp(struct ib_qp *qp); |
Moni Shoua | 850d8fd | 2016-11-10 11:30:56 +0200 | [diff] [blame] | 4386 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 4387 | int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, |
| 4388 | u8 *width); |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4389 | |
| 4390 | static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) |
| 4391 | { |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4392 | if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) |
| 4393 | return attr->roce.dmac; |
| 4394 | return NULL; |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4395 | } |
| 4396 | |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 4397 | static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid) |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4398 | { |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4399 | if (attr->type == RDMA_AH_ATTR_TYPE_IB) |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 4400 | attr->ib.dlid = (u16)dlid; |
| 4401 | else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) |
| 4402 | attr->opa.dlid = dlid; |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4403 | } |
| 4404 | |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 4405 | static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr) |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4406 | { |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4407 | if (attr->type == RDMA_AH_ATTR_TYPE_IB) |
| 4408 | return attr->ib.dlid; |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 4409 | else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) |
| 4410 | return attr->opa.dlid; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4411 | return 0; |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4412 | } |
| 4413 | |
| 4414 | static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl) |
| 4415 | { |
| 4416 | attr->sl = sl; |
| 4417 | } |
| 4418 | |
| 4419 | static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr) |
| 4420 | { |
| 4421 | return attr->sl; |
| 4422 | } |
| 4423 | |
| 4424 | static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr, |
| 4425 | u8 src_path_bits) |
| 4426 | { |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4427 | if (attr->type == RDMA_AH_ATTR_TYPE_IB) |
| 4428 | attr->ib.src_path_bits = src_path_bits; |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 4429 | else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) |
| 4430 | attr->opa.src_path_bits = src_path_bits; |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4431 | } |
| 4432 | |
| 4433 | static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) |
| 4434 | { |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4435 | if (attr->type == RDMA_AH_ATTR_TYPE_IB) |
| 4436 | return attr->ib.src_path_bits; |
Dasaratharaman Chandramouli | 64b4646 | 2017-04-29 14:41:30 -0400 | [diff] [blame] | 4437 | else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) |
| 4438 | return attr->opa.src_path_bits; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4439 | return 0; |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4440 | } |
| 4441 | |
Don Hiatt | d98bb7f | 2017-08-04 13:54:16 -0700 | [diff] [blame] | 4442 | static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr, |
| 4443 | bool make_grd) |
| 4444 | { |
| 4445 | if (attr->type == RDMA_AH_ATTR_TYPE_OPA) |
| 4446 | attr->opa.make_grd = make_grd; |
| 4447 | } |
| 4448 | |
| 4449 | static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) |
| 4450 | { |
| 4451 | if (attr->type == RDMA_AH_ATTR_TYPE_OPA) |
| 4452 | return attr->opa.make_grd; |
| 4453 | return false; |
| 4454 | } |
| 4455 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 4456 | static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num) |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4457 | { |
| 4458 | attr->port_num = port_num; |
| 4459 | } |
| 4460 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 4461 | static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4462 | { |
| 4463 | return attr->port_num; |
| 4464 | } |
| 4465 | |
| 4466 | static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr, |
| 4467 | u8 static_rate) |
| 4468 | { |
| 4469 | attr->static_rate = static_rate; |
| 4470 | } |
| 4471 | |
| 4472 | static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr) |
| 4473 | { |
| 4474 | return attr->static_rate; |
| 4475 | } |
| 4476 | |
| 4477 | static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr, |
| 4478 | enum ib_ah_flags flag) |
| 4479 | { |
| 4480 | attr->ah_flags = flag; |
| 4481 | } |
| 4482 | |
| 4483 | static inline enum ib_ah_flags |
| 4484 | rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr) |
| 4485 | { |
| 4486 | return attr->ah_flags; |
| 4487 | } |
| 4488 | |
| 4489 | static inline const struct ib_global_route |
| 4490 | *rdma_ah_read_grh(const struct rdma_ah_attr *attr) |
| 4491 | { |
| 4492 | return &attr->grh; |
| 4493 | } |
| 4494 | |
| 4495 | /*To retrieve and modify the grh */ |
| 4496 | static inline struct ib_global_route |
| 4497 | *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr) |
| 4498 | { |
| 4499 | return &attr->grh; |
| 4500 | } |
| 4501 | |
| 4502 | static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid) |
| 4503 | { |
| 4504 | struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); |
| 4505 | |
| 4506 | memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); |
| 4507 | } |
| 4508 | |
| 4509 | static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr, |
| 4510 | __be64 prefix) |
| 4511 | { |
| 4512 | struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); |
| 4513 | |
| 4514 | grh->dgid.global.subnet_prefix = prefix; |
| 4515 | } |
| 4516 | |
| 4517 | static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr, |
| 4518 | __be64 if_id) |
| 4519 | { |
| 4520 | struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); |
| 4521 | |
| 4522 | grh->dgid.global.interface_id = if_id; |
| 4523 | } |
| 4524 | |
| 4525 | static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, |
| 4526 | union ib_gid *dgid, u32 flow_label, |
| 4527 | u8 sgid_index, u8 hop_limit, |
| 4528 | u8 traffic_class) |
| 4529 | { |
| 4530 | struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); |
| 4531 | |
| 4532 | attr->ah_flags = IB_AH_GRH; |
| 4533 | if (dgid) |
| 4534 | grh->dgid = *dgid; |
| 4535 | grh->flow_label = flow_label; |
| 4536 | grh->sgid_index = sgid_index; |
| 4537 | grh->hop_limit = hop_limit; |
| 4538 | grh->traffic_class = traffic_class; |
Jason Gunthorpe | 8d9ec9a | 2018-06-13 10:22:03 +0300 | [diff] [blame] | 4539 | grh->sgid_attr = NULL; |
Dasaratharaman Chandramouli | 2224c47 | 2017-04-29 14:41:27 -0400 | [diff] [blame] | 4540 | } |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4541 | |
Jason Gunthorpe | 8d9ec9a | 2018-06-13 10:22:03 +0300 | [diff] [blame] | 4542 | void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr); |
| 4543 | void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, |
| 4544 | u32 flow_label, u8 hop_limit, u8 traffic_class, |
| 4545 | const struct ib_gid_attr *sgid_attr); |
Jason Gunthorpe | d97099f | 2018-06-13 10:22:05 +0300 | [diff] [blame] | 4546 | void rdma_copy_ah_attr(struct rdma_ah_attr *dest, |
| 4547 | const struct rdma_ah_attr *src); |
| 4548 | void rdma_replace_ah_attr(struct rdma_ah_attr *old, |
| 4549 | const struct rdma_ah_attr *new); |
| 4550 | void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src); |
Jason Gunthorpe | 8d9ec9a | 2018-06-13 10:22:03 +0300 | [diff] [blame] | 4551 | |
Don Hiatt | 87daac6 | 2018-02-01 10:57:03 -0800 | [diff] [blame] | 4552 | /** |
| 4553 | * rdma_ah_find_type - Return address handle type. |
| 4554 | * |
| 4555 | * @dev: Device to be checked |
| 4556 | * @port_num: Port number |
| 4557 | */ |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4558 | static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 4559 | u32 port_num) |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4560 | { |
Parav Pandit | a6532e7 | 2018-01-12 07:58:42 +0200 | [diff] [blame] | 4561 | if (rdma_protocol_roce(dev, port_num)) |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4562 | return RDMA_AH_ATTR_TYPE_ROCE; |
Don Hiatt | 87daac6 | 2018-02-01 10:57:03 -0800 | [diff] [blame] | 4563 | if (rdma_protocol_ib(dev, port_num)) { |
| 4564 | if (rdma_cap_opa_ah(dev, port_num)) |
| 4565 | return RDMA_AH_ATTR_TYPE_OPA; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4566 | return RDMA_AH_ATTR_TYPE_IB; |
Don Hiatt | 87daac6 | 2018-02-01 10:57:03 -0800 | [diff] [blame] | 4567 | } |
| 4568 | |
| 4569 | return RDMA_AH_ATTR_TYPE_UNDEFINED; |
Dasaratharaman Chandramouli | 44c5848 | 2017-04-29 14:41:29 -0400 | [diff] [blame] | 4570 | } |
Hiatt, Don | 7db20ec | 2017-06-08 13:37:49 -0400 | [diff] [blame] | 4571 | |
Hiatt, Don | 62ede77 | 2017-08-14 14:17:43 -0400 | [diff] [blame] | 4572 | /** |
| 4573 | * ib_lid_cpu16 - Return lid in 16bit CPU encoding. |
| 4574 | * In the current implementation the only way to get |
| 4575 | * get the 32bit lid is from other sources for OPA. |
| 4576 | * For IB, lids will always be 16bits so cast the |
| 4577 | * value accordingly. |
| 4578 | * |
| 4579 | * @lid: A 32bit LID |
| 4580 | */ |
| 4581 | static inline u16 ib_lid_cpu16(u32 lid) |
Hiatt, Don | 7db20ec | 2017-06-08 13:37:49 -0400 | [diff] [blame] | 4582 | { |
Hiatt, Don | 62ede77 | 2017-08-14 14:17:43 -0400 | [diff] [blame] | 4583 | WARN_ON_ONCE(lid & 0xFFFF0000); |
| 4584 | return (u16)lid; |
Hiatt, Don | 7db20ec | 2017-06-08 13:37:49 -0400 | [diff] [blame] | 4585 | } |
| 4586 | |
Hiatt, Don | 62ede77 | 2017-08-14 14:17:43 -0400 | [diff] [blame] | 4587 | /** |
| 4588 | * ib_lid_be16 - Return lid in 16bit BE encoding. |
| 4589 | * |
| 4590 | * @lid: A 32bit LID |
| 4591 | */ |
| 4592 | static inline __be16 ib_lid_be16(u32 lid) |
Hiatt, Don | 7db20ec | 2017-06-08 13:37:49 -0400 | [diff] [blame] | 4593 | { |
Hiatt, Don | 62ede77 | 2017-08-14 14:17:43 -0400 | [diff] [blame] | 4594 | WARN_ON_ONCE(lid & 0xFFFF0000); |
| 4595 | return cpu_to_be16((u16)lid); |
Hiatt, Don | 7db20ec | 2017-06-08 13:37:49 -0400 | [diff] [blame] | 4596 | } |
Doug Ledford | 3204383 | 2017-08-10 14:31:29 -0400 | [diff] [blame] | 4597 | |
Sagi Grimberg | c66cd35 | 2017-07-13 11:09:41 +0300 | [diff] [blame] | 4598 | /** |
| 4599 | * ib_get_vector_affinity - Get the affinity mappings of a given completion |
| 4600 | * vector |
| 4601 | * @device: the rdma device |
| 4602 | * @comp_vector: index of completion vector |
| 4603 | * |
| 4604 | * Returns NULL on failure, otherwise a corresponding cpu map of the |
| 4605 | * completion vector (returns all-cpus map if the device driver doesn't |
| 4606 | * implement get_vector_affinity). |
| 4607 | */ |
| 4608 | static inline const struct cpumask * |
| 4609 | ib_get_vector_affinity(struct ib_device *device, int comp_vector) |
| 4610 | { |
| 4611 | if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 4612 | !device->ops.get_vector_affinity) |
Sagi Grimberg | c66cd35 | 2017-07-13 11:09:41 +0300 | [diff] [blame] | 4613 | return NULL; |
| 4614 | |
Kamal Heib | 3023a1e | 2018-12-10 21:09:48 +0200 | [diff] [blame] | 4615 | return device->ops.get_vector_affinity(device, comp_vector); |
Sagi Grimberg | c66cd35 | 2017-07-13 11:09:41 +0300 | [diff] [blame] | 4616 | |
| 4617 | } |
| 4618 | |
Daniel Jurgens | 32f69e4 | 2018-01-04 17:25:36 +0200 | [diff] [blame] | 4619 | /** |
| 4620 | * rdma_roce_rescan_device - Rescan all of the network devices in the system |
| 4621 | * and add their gids, as needed, to the relevant RoCE devices. |
| 4622 | * |
| 4623 | * @device: the rdma device |
| 4624 | */ |
| 4625 | void rdma_roce_rescan_device(struct ib_device *ibdev); |
| 4626 | |
Jason Gunthorpe | 8313c10 | 2018-11-25 20:51:13 +0200 | [diff] [blame] | 4627 | struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); |
Yishai Hadas | 7dc08dc | 2018-06-17 12:59:59 +0300 | [diff] [blame] | 4628 | |
Jason Gunthorpe | 15a1b4b | 2018-11-25 20:51:15 +0200 | [diff] [blame] | 4629 | int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); |
Denis Drozdov | f6a8a19 | 2018-08-14 14:08:51 +0300 | [diff] [blame] | 4630 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 4631 | struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, |
Denis Drozdov | f6a8a19 | 2018-08-14 14:08:51 +0300 | [diff] [blame] | 4632 | enum rdma_netdev_t type, const char *name, |
| 4633 | unsigned char name_assign_type, |
| 4634 | void (*setup)(struct net_device *)); |
Denis Drozdov | 5d6b0cb | 2018-08-14 14:22:35 +0300 | [diff] [blame] | 4635 | |
Mark Bloch | 1fb7f89 | 2021-03-01 09:04:20 +0200 | [diff] [blame] | 4636 | int rdma_init_netdev(struct ib_device *device, u32 port_num, |
Denis Drozdov | 5d6b0cb | 2018-08-14 14:22:35 +0300 | [diff] [blame] | 4637 | enum rdma_netdev_t type, const char *name, |
| 4638 | unsigned char name_assign_type, |
| 4639 | void (*setup)(struct net_device *), |
| 4640 | struct net_device *netdev); |
| 4641 | |
Parav Pandit | d4122f5 | 2018-10-11 22:31:53 +0300 | [diff] [blame] | 4642 | /** |
Parav Pandit | 5474723 | 2018-12-18 14:15:56 +0200 | [diff] [blame] | 4643 | * rdma_device_to_ibdev - Get ib_device pointer from device pointer |
| 4644 | * |
| 4645 | * @device: device pointer for which ib_device pointer to retrieve |
| 4646 | * |
| 4647 | * rdma_device_to_ibdev() retrieves ib_device pointer from device. |
| 4648 | * |
| 4649 | */ |
| 4650 | static inline struct ib_device *rdma_device_to_ibdev(struct device *device) |
| 4651 | { |
Parav Pandit | cebe556 | 2019-02-26 13:56:11 +0200 | [diff] [blame] | 4652 | struct ib_core_device *coredev = |
| 4653 | container_of(device, struct ib_core_device, dev); |
| 4654 | |
| 4655 | return coredev->owner; |
Parav Pandit | 5474723 | 2018-12-18 14:15:56 +0200 | [diff] [blame] | 4656 | } |
| 4657 | |
| 4658 | /** |
Christoph Hellwig | 8ecfca6 | 2020-11-06 19:19:34 +0100 | [diff] [blame] | 4659 | * ibdev_to_node - return the NUMA node for a given ib_device |
| 4660 | * @dev: device to get the NUMA node for. |
| 4661 | */ |
| 4662 | static inline int ibdev_to_node(struct ib_device *ibdev) |
| 4663 | { |
| 4664 | struct device *parent = ibdev->dev.parent; |
| 4665 | |
| 4666 | if (!parent) |
| 4667 | return NUMA_NO_NODE; |
| 4668 | return dev_to_node(parent); |
| 4669 | } |
| 4670 | |
| 4671 | /** |
Parav Pandit | 5474723 | 2018-12-18 14:15:56 +0200 | [diff] [blame] | 4672 | * rdma_device_to_drv_device - Helper macro to reach back to driver's |
| 4673 | * ib_device holder structure from device pointer. |
| 4674 | * |
| 4675 | * NOTE: New drivers should not make use of this API; This API is only for |
| 4676 | * existing drivers who have exposed sysfs entries using |
Jason Gunthorpe | 915e4af | 2021-06-11 19:00:34 +0300 | [diff] [blame] | 4677 | * ops->device_group. |
Parav Pandit | 5474723 | 2018-12-18 14:15:56 +0200 | [diff] [blame] | 4678 | */ |
| 4679 | #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \ |
| 4680 | container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member) |
Parav Pandit | 41c6140 | 2019-02-26 14:01:46 +0200 | [diff] [blame] | 4681 | |
| 4682 | bool rdma_dev_access_netns(const struct ib_device *device, |
| 4683 | const struct net *net); |
Mark Zhang | d5665a2 | 2020-05-04 08:19:31 +0300 | [diff] [blame] | 4684 | |
| 4685 | #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) |
Weihang Li | 074bf2c | 2020-08-21 17:31:29 +0800 | [diff] [blame] | 4686 | #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) |
Mark Zhang | d5665a2 | 2020-05-04 08:19:31 +0300 | [diff] [blame] | 4687 | #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) |
| 4688 | |
| 4689 | /** |
| 4690 | * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based |
| 4691 | * on the flow_label |
| 4692 | * |
| 4693 | * This function will convert the 20 bit flow_label input to a valid RoCE v2 |
| 4694 | * UDP src port 14 bit value. All RoCE V2 drivers should use this same |
| 4695 | * convention. |
| 4696 | */ |
| 4697 | static inline u16 rdma_flow_label_to_udp_sport(u32 fl) |
| 4698 | { |
| 4699 | u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000; |
| 4700 | |
| 4701 | fl_low ^= fl_high >> 14; |
| 4702 | return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN); |
| 4703 | } |
| 4704 | |
| 4705 | /** |
| 4706 | * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on |
| 4707 | * local and remote qpn values |
| 4708 | * |
| 4709 | * This function folded the multiplication results of two qpns, 24 bit each, |
| 4710 | * fields, and converts it to a 20 bit results. |
| 4711 | * |
| 4712 | * This function will create symmetric flow_label value based on the local |
| 4713 | * and remote qpn values. this will allow both the requester and responder |
| 4714 | * to calculate the same flow_label for a given connection. |
| 4715 | * |
| 4716 | * This helper function should be used by driver in case the upper layer |
| 4717 | * provide a zero flow_label value. This is to improve entropy of RDMA |
| 4718 | * traffic in the network. |
| 4719 | */ |
| 4720 | static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn) |
| 4721 | { |
| 4722 | u64 v = (u64)lqpn * rqpn; |
| 4723 | |
| 4724 | v ^= v >> 20; |
| 4725 | v ^= v >> 40; |
| 4726 | |
| 4727 | return (u32)(v & IB_GRH_FLOWLABEL_MASK); |
| 4728 | } |
Parav Pandit | 7416790 | 2021-02-03 15:01:32 +0200 | [diff] [blame] | 4729 | |
| 4730 | const struct ib_port_immutable* |
| 4731 | ib_port_immutable_read(struct ib_device *dev, unsigned int port); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4732 | #endif /* IB_VERBS_H */ |