blob: 2ba71102b1f1f28025d6b31007efab81b9d36b6f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andy Groverec162272009-02-24 15:30:30 +00002#ifndef _RDS_IB_H
3#define _RDS_IB_H
4
5#include <rdma/ib_verbs.h>
6#include <rdma/rdma_cm.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +00007#include <linux/interrupt.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07008#include <linux/pci.h>
9#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +000010#include "rds.h"
11#include "rdma_transport.h"
12
Andy Groverec162272009-02-24 15:30:30 +000013#define RDS_IB_MAX_SGE 8
14#define RDS_IB_RECV_SGE 2
15
16#define RDS_IB_DEFAULT_RECV_WR 1024
17#define RDS_IB_DEFAULT_SEND_WR 256
Gerd Rauscha5520782019-06-28 17:31:19 -070018#define RDS_IB_DEFAULT_FR_WR 512
Andy Groverec162272009-02-24 15:30:30 +000019
Santosh Shilimkarfab8688d2016-07-04 15:31:21 -070020#define RDS_IB_DEFAULT_RETRY_COUNT 1
Andy Grover3ba23ad2009-07-17 13:13:22 +000021
Andy Groverec162272009-02-24 15:30:30 +000022#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
23
Chris Mason33244122010-05-26 22:05:37 -070024#define RDS_IB_RECYCLE_BATCH_COUNT 32
25
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040026#define RDS_IB_WC_MAX 32
27
Zach Brownea819862010-07-15 12:34:33 -070028extern struct rw_semaphore rds_ib_devices_lock;
Andy Groverec162272009-02-24 15:30:30 +000029extern struct list_head rds_ib_devices;
30
31/*
32 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
33 * try and minimize the amount of memory tied up both the device and
34 * socket receive queues.
35 */
Andy Groverec162272009-02-24 15:30:30 +000036struct rds_page_frag {
37 struct list_head f_item;
Chris Mason33244122010-05-26 22:05:37 -070038 struct list_head f_cache_entry;
Andy Grover0b088e02010-05-24 20:12:41 -070039 struct scatterlist f_sg;
Andy Groverec162272009-02-24 15:30:30 +000040};
41
42struct rds_ib_incoming {
43 struct list_head ii_frags;
Chris Mason33244122010-05-26 22:05:37 -070044 struct list_head ii_cache_entry;
Andy Groverec162272009-02-24 15:30:30 +000045 struct rds_incoming ii_inc;
46};
47
Chris Mason33244122010-05-26 22:05:37 -070048struct rds_ib_cache_head {
49 struct list_head *first;
50 unsigned long count;
51};
52
53struct rds_ib_refill_cache {
Shan Weiae4b46e2012-11-12 15:52:01 +000054 struct rds_ib_cache_head __percpu *percpu;
Chris Mason33244122010-05-26 22:05:37 -070055 struct list_head *xfer;
56 struct list_head *ready;
57};
58
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070059/* This is the common structure for the IB private data exchange in setting up
60 * an RDS connection. The exchange is different for IPv4 and IPv6 connections.
61 * The reason is that the address size is different and the addresses
62 * exchanged are in the beginning of the structure. Hence it is not possible
63 * for interoperability if same structure is used.
64 */
65struct rds_ib_conn_priv_cmn {
66 u8 ricpc_protocol_major;
67 u8 ricpc_protocol_minor;
68 __be16 ricpc_protocol_minor_mask; /* bitmask */
Santosh Shilimkarfd261ce2018-10-13 22:13:23 +080069 u8 ricpc_dp_toss;
70 u8 ripc_reserved1;
71 __be16 ripc_reserved2;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070072 __be64 ricpc_ack_seq;
73 __be32 ricpc_credit; /* non-zero enables flow ctl */
74};
75
Andy Groverec162272009-02-24 15:30:30 +000076struct rds_ib_connect_private {
77 /* Add new fields at the end, and don't permute existing fields. */
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070078 __be32 dp_saddr;
79 __be32 dp_daddr;
80 struct rds_ib_conn_priv_cmn dp_cmn;
81};
82
83struct rds6_ib_connect_private {
84 /* Add new fields at the end, and don't permute existing fields. */
85 struct in6_addr dp_saddr;
86 struct in6_addr dp_daddr;
87 struct rds_ib_conn_priv_cmn dp_cmn;
88};
89
90#define dp_protocol_major dp_cmn.ricpc_protocol_major
91#define dp_protocol_minor dp_cmn.ricpc_protocol_minor
92#define dp_protocol_minor_mask dp_cmn.ricpc_protocol_minor_mask
93#define dp_ack_seq dp_cmn.ricpc_ack_seq
94#define dp_credit dp_cmn.ricpc_credit
95
96union rds_ib_conn_priv {
97 struct rds_ib_connect_private ricp_v4;
98 struct rds6_ib_connect_private ricp_v6;
Andy Groverec162272009-02-24 15:30:30 +000099};
100
101struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -0800102 void *s_op;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100103 union {
104 struct ib_send_wr s_wr;
105 struct ib_rdma_wr s_rdma_wr;
106 struct ib_atomic_wr s_atomic_wr;
107 };
Andy Groverec162272009-02-24 15:30:30 +0000108 struct ib_sge s_sge[RDS_IB_MAX_SGE];
109 unsigned long s_queued;
110};
111
112struct rds_ib_recv_work {
113 struct rds_ib_incoming *r_ibinc;
114 struct rds_page_frag *r_frag;
115 struct ib_recv_wr r_wr;
116 struct ib_sge r_sge[2];
117};
118
119struct rds_ib_work_ring {
120 u32 w_nr;
121 u32 w_alloc_ptr;
122 u32 w_alloc_ctr;
123 u32 w_free_ptr;
124 atomic_t w_free_ctr;
125};
126
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400127/* Rings are posted with all the allocations they'll need to queue the
128 * incoming message to the receiving socket so this can't fail.
129 * All fragments start with a header, so we can make sure we're not receiving
130 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
131 */
132struct rds_ib_ack_state {
133 u64 ack_next;
134 u64 ack_recv;
135 unsigned int ack_required:1;
136 unsigned int ack_next_valid:1;
137 unsigned int ack_recv_valid:1;
138};
139
140
Andy Groverec162272009-02-24 15:30:30 +0000141struct rds_ib_device;
142
143struct rds_ib_connection {
144
145 struct list_head ib_node;
146 struct rds_ib_device *rds_ibdev;
147 struct rds_connection *conn;
148
149 /* alphabet soup, IBTA style */
150 struct rdma_cm_id *i_cm_id;
151 struct ib_pd *i_pd;
Andy Groverec162272009-02-24 15:30:30 +0000152 struct ib_cq *i_send_cq;
153 struct ib_cq *i_recv_cq;
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400154 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400155 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
156
santosh.shilimkar@oracle.comad6832f2016-03-01 15:20:53 -0800157 /* To control the number of wrs from fastreg */
158 atomic_t i_fastreg_wrs;
Gerd Rausch3a2886c2019-07-16 15:29:17 -0700159 atomic_t i_fastreg_inuse_count;
santosh.shilimkar@oracle.comad6832f2016-03-01 15:20:53 -0800160
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400161 /* interrupt handling */
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400162 struct tasklet_struct i_send_tasklet;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400163 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000164
165 /* tx */
166 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -0800167 struct rm_data_op *i_data_op;
Ka-Cheong Poon9b17f582019-10-02 21:11:08 -0700168 struct rds_header **i_send_hdrs;
169 dma_addr_t *i_send_hdrs_dma;
Andy Groverec162272009-02-24 15:30:30 +0000170 struct rds_ib_send_work *i_sends;
Zach Brownf0460112010-07-14 13:55:35 -0700171 atomic_t i_signaled_sends;
Andy Groverec162272009-02-24 15:30:30 +0000172
173 /* rx */
174 struct mutex i_recv_mutex;
175 struct rds_ib_work_ring i_recv_ring;
176 struct rds_ib_incoming *i_ibinc;
177 u32 i_recv_data_rem;
Ka-Cheong Poon9b17f582019-10-02 21:11:08 -0700178 struct rds_header **i_recv_hdrs;
179 dma_addr_t *i_recv_hdrs_dma;
Andy Groverec162272009-02-24 15:30:30 +0000180 struct rds_ib_recv_work *i_recvs;
Andy Groverec162272009-02-24 15:30:30 +0000181 u64 i_ack_recv; /* last ACK received */
Chris Mason33244122010-05-26 22:05:37 -0700182 struct rds_ib_refill_cache i_cache_incs;
183 struct rds_ib_refill_cache i_cache_frags;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700184 atomic_t i_cache_allocs;
Andy Groverec162272009-02-24 15:30:30 +0000185
186 /* sending acks */
187 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000188#ifdef KERNEL_HAS_ATOMIC64
189 atomic64_t i_ack_next; /* next ACK to send */
190#else
191 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000192 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000193#endif
Andy Groverec162272009-02-24 15:30:30 +0000194 struct rds_header *i_ack;
195 struct ib_send_wr i_ack_wr;
196 struct ib_sge i_ack_sge;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800197 dma_addr_t i_ack_dma;
Andy Groverec162272009-02-24 15:30:30 +0000198 unsigned long i_ack_queued;
199
200 /* Flow control related information
201 *
202 * Our algorithm uses a pair variables that we need to access
203 * atomically - one for the send credits, and one posted
204 * recv credits we need to transfer to remote.
205 * Rather than protect them using a slow spinlock, we put both into
206 * a single atomic_t and update it using cmpxchg
207 */
208 atomic_t i_credits;
209
210 /* Protocol version specific information */
211 unsigned int i_flowctl:1; /* enable/disable flow ctl */
212
213 /* Batched completions */
214 unsigned int i_unsignaled_wrs;
Santosh Shilimkar581d53c2016-07-09 18:31:38 -0700215
216 /* Endpoint role in connection */
217 bool i_active_side;
Santosh Shilimkarcf657262016-09-29 11:07:11 -0700218 atomic_t i_cq_quiesce;
Santosh Shilimkarbe2f76e2016-07-04 16:16:36 -0700219
220 /* Send/Recv vectors */
221 int i_scq_vector;
222 int i_rcq_vector;
Zhu Yanjune0e6d062019-08-23 21:04:16 -0400223 u8 i_sl;
Andy Groverec162272009-02-24 15:30:30 +0000224};
225
226/* This assumes that atomic_t is at least 32 bits */
227#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
228#define IB_GET_POST_CREDITS(v) ((v) >> 16)
229#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
230#define IB_SET_POST_CREDITS(v) ((v) << 16)
231
232struct rds_ib_ipaddr {
233 struct list_head list;
234 __be32 ipaddr;
Santosh Shilimkar59fe4602012-02-03 11:09:23 -0500235 struct rcu_head rcu;
Andy Groverec162272009-02-24 15:30:30 +0000236};
237
Santosh Shilimkar06766512015-09-10 21:20:57 -0700238enum {
239 RDS_IB_MR_8K_POOL,
240 RDS_IB_MR_1M_POOL,
241};
242
Andy Groverec162272009-02-24 15:30:30 +0000243struct rds_ib_device {
244 struct list_head list;
245 struct list_head ipaddr_list;
246 struct list_head conn_list;
247 struct ib_device *dev;
248 struct ib_pd *pd;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200249 u8 odp_capable:1;
santosh.shilimkar@oracle.com2cb29122016-03-01 15:20:52 -0800250
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800251 unsigned int max_mrs;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700252 struct rds_ib_mr_pool *mr_1m_pool;
253 struct rds_ib_mr_pool *mr_8k_pool;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800254 unsigned int max_8k_mrs;
255 unsigned int max_1m_mrs;
Andy Groverec162272009-02-24 15:30:30 +0000256 int max_sge;
257 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800258 unsigned int max_initiator_depth;
259 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000260 spinlock_t spinlock; /* protect the above */
Reshetova, Elena50d61ff2017-07-04 15:53:15 +0300261 refcount_t refcount;
Zach Brown3e0249f2010-05-18 15:48:51 -0700262 struct work_struct free_work;
Santosh Shilimkarbe2f76e2016-07-04 16:16:36 -0700263 int *vector_load;
Andy Groverec162272009-02-24 15:30:30 +0000264};
265
Andy Grovere4c52c92010-04-23 10:49:53 -0700266#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
267
Andy Groverec162272009-02-24 15:30:30 +0000268/* bits for i_ack_flags */
269#define IB_ACK_IN_FLIGHT 0
270#define IB_ACK_REQUESTED 1
271
272/* Magic WR_ID for ACKs */
273#define RDS_IB_ACK_WR_ID (~(u64) 0)
274
275struct rds_ib_statistics {
276 uint64_t s_ib_connect_raced;
277 uint64_t s_ib_listen_closed_stale;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400278 uint64_t s_ib_evt_handler_call;
279 uint64_t s_ib_tasklet_call;
Andy Groverec162272009-02-24 15:30:30 +0000280 uint64_t s_ib_tx_cq_event;
281 uint64_t s_ib_tx_ring_full;
282 uint64_t s_ib_tx_throttle;
283 uint64_t s_ib_tx_sg_mapping_failure;
284 uint64_t s_ib_tx_stalled;
285 uint64_t s_ib_tx_credit_updates;
Andy Groverec162272009-02-24 15:30:30 +0000286 uint64_t s_ib_rx_cq_event;
287 uint64_t s_ib_rx_ring_empty;
288 uint64_t s_ib_rx_refill_from_cq;
289 uint64_t s_ib_rx_refill_from_thread;
290 uint64_t s_ib_rx_alloc_limit;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700291 uint64_t s_ib_rx_total_frags;
292 uint64_t s_ib_rx_total_incs;
Andy Groverec162272009-02-24 15:30:30 +0000293 uint64_t s_ib_rx_credit_updates;
294 uint64_t s_ib_ack_sent;
295 uint64_t s_ib_ack_send_failure;
296 uint64_t s_ib_ack_send_delayed;
297 uint64_t s_ib_ack_send_piggybacked;
298 uint64_t s_ib_ack_received;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700299 uint64_t s_ib_rdma_mr_8k_alloc;
300 uint64_t s_ib_rdma_mr_8k_free;
301 uint64_t s_ib_rdma_mr_8k_used;
302 uint64_t s_ib_rdma_mr_8k_pool_flush;
303 uint64_t s_ib_rdma_mr_8k_pool_wait;
304 uint64_t s_ib_rdma_mr_8k_pool_depleted;
305 uint64_t s_ib_rdma_mr_1m_alloc;
306 uint64_t s_ib_rdma_mr_1m_free;
307 uint64_t s_ib_rdma_mr_1m_used;
308 uint64_t s_ib_rdma_mr_1m_pool_flush;
309 uint64_t s_ib_rdma_mr_1m_pool_wait;
310 uint64_t s_ib_rdma_mr_1m_pool_depleted;
santosh.shilimkar@oracle.comdb427532016-03-01 15:20:51 -0800311 uint64_t s_ib_rdma_mr_8k_reused;
312 uint64_t s_ib_rdma_mr_1m_reused;
Andy Grover51e2cba2010-03-29 17:47:30 -0700313 uint64_t s_ib_atomic_cswp;
314 uint64_t s_ib_atomic_fadd;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700315 uint64_t s_ib_recv_added_to_cache;
316 uint64_t s_ib_recv_removed_from_cache;
Andy Groverec162272009-02-24 15:30:30 +0000317};
318
319extern struct workqueue_struct *rds_ib_wq;
320
321/*
322 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
323 * doesn't define it.
324 */
325static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200326 struct scatterlist *sglist,
327 unsigned int sg_dma_len,
328 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000329{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200330 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000331 unsigned int i;
332
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200333 for_each_sg(sglist, sg, sg_dma_len, i) {
Bart Van Asschea163afc2019-01-31 08:30:34 -0800334 ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
335 sg_dma_len(sg), direction);
Andy Groverec162272009-02-24 15:30:30 +0000336 }
337}
338#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
339
340static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200341 struct scatterlist *sglist,
342 unsigned int sg_dma_len,
343 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000344{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200345 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000346 unsigned int i;
347
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200348 for_each_sg(sglist, sg, sg_dma_len, i) {
Bart Van Asschea163afc2019-01-31 08:30:34 -0800349 ib_dma_sync_single_for_device(dev, sg_dma_address(sg),
350 sg_dma_len(sg), direction);
Andy Groverec162272009-02-24 15:30:30 +0000351 }
352}
353#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
354
355
356/* ib.c */
357extern struct rds_transport rds_ib_transport;
Zach Brown3e0249f2010-05-18 15:48:51 -0700358struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
359void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000360extern struct ib_client rds_ib_client;
361
Andy Grover3ba23ad2009-07-17 13:13:22 +0000362extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000363
364extern spinlock_t ib_nodev_conns_lock;
365extern struct list_head ib_nodev_conns;
366
367/* ib_cm.c */
368int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
369void rds_ib_conn_free(void *arg);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700370int rds_ib_conn_path_connect(struct rds_conn_path *cp);
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700371void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
Andy Groverec162272009-02-24 15:30:30 +0000372void rds_ib_state_change(struct sock *sk);
Zach Brownef87b7e2010-07-09 12:26:20 -0700373int rds_ib_listen_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000374void rds_ib_listen_stop(void);
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200375__printf(2, 3)
Andy Groverec162272009-02-24 15:30:30 +0000376void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
377int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700378 struct rdma_cm_event *event, bool isv6);
379int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
Andy Groverec162272009-02-24 15:30:30 +0000380void rds_ib_cm_connect_complete(struct rds_connection *conn,
381 struct rdma_cm_event *event);
Andy Groverec162272009-02-24 15:30:30 +0000382
383#define rds_ib_conn_error(conn, fmt...) \
384 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
385
386/* ib_rdma.c */
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700387int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
388 struct in6_addr *ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000389void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
390void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700391void rds_ib_destroy_nodev_conns(void);
Avinash Repaka16591852016-03-01 15:20:54 -0800392void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000393
394/* ib_recv.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700395int rds_ib_recv_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000396void rds_ib_recv_exit(void);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700397int rds_ib_recv_path(struct rds_conn_path *conn);
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700398int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
Chris Mason33244122010-05-26 22:05:37 -0700399void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700400void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
Andy Groverec162272009-02-24 15:30:30 +0000401void rds_ib_inc_free(struct rds_incoming *inc);
Al Viroc310e722014-11-20 09:21:14 -0500402int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400403void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
404 struct rds_ib_ack_state *state);
Andy Groverd521b632009-10-30 08:51:57 +0000405void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000406void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
407void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
408void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
409void rds_ib_attempt_ack(struct rds_ib_connection *ic);
410void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
411u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400412void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
Andy Groverec162272009-02-24 15:30:30 +0000413
414/* ib_ring.c */
415void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
416void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
417u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
418void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
419void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
420int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
421int rds_ib_ring_low(struct rds_ib_work_ring *ring);
422u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
423u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
424extern wait_queue_head_t rds_ib_ring_empty_wait;
425
426/* ib_send.c */
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700427void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
Andy Groverec162272009-02-24 15:30:30 +0000428int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
429 unsigned int hdr_off, unsigned int sg, unsigned int off);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400430void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000431void rds_ib_send_init_ring(struct rds_ib_connection *ic);
432void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800433int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000434void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
435void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
436int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000437 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800438int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000439
440/* ib_stats.c */
David S. Miller16fdf8b2018-09-23 12:25:15 -0700441DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
Andy Groverec162272009-02-24 15:30:30 +0000442#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700443#define rds_ib_stats_add(member, count) \
444 rds_stats_add_which(rds_ib_stats, member, count)
Andy Groverec162272009-02-24 15:30:30 +0000445unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
446 unsigned int avail);
447
448/* ib_sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700449int rds_ib_sysctl_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000450void rds_ib_sysctl_exit(void);
451extern unsigned long rds_ib_sysctl_max_send_wr;
452extern unsigned long rds_ib_sysctl_max_recv_wr;
453extern unsigned long rds_ib_sysctl_max_unsig_wrs;
454extern unsigned long rds_ib_sysctl_max_unsig_bytes;
455extern unsigned long rds_ib_sysctl_max_recv_allocation;
456extern unsigned int rds_ib_sysctl_flow_control;
Andy Groverec162272009-02-24 15:30:30 +0000457
Andy Groverec162272009-02-24 15:30:30 +0000458#endif