blob: 67a715b076cab1146e4748eeec3ccc43f2f48925 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andy Groverec162272009-02-24 15:30:30 +00002#ifndef _RDS_IB_H
3#define _RDS_IB_H
4
5#include <rdma/ib_verbs.h>
6#include <rdma/rdma_cm.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +00007#include <linux/interrupt.h>
Andy Grovere4c52c92010-04-23 10:49:53 -07008#include <linux/pci.h>
9#include <linux/slab.h>
Andy Groverec162272009-02-24 15:30:30 +000010#include "rds.h"
11#include "rdma_transport.h"
12
Andy Groverec162272009-02-24 15:30:30 +000013#define RDS_IB_MAX_SGE 8
14#define RDS_IB_RECV_SGE 2
15
16#define RDS_IB_DEFAULT_RECV_WR 1024
17#define RDS_IB_DEFAULT_SEND_WR 256
Santosh Shilimkar56012452016-03-08 09:19:01 -080018#define RDS_IB_DEFAULT_FR_WR 256
19#define RDS_IB_DEFAULT_FR_INV_WR 256
Andy Groverec162272009-02-24 15:30:30 +000020
Santosh Shilimkarfab8688d2016-07-04 15:31:21 -070021#define RDS_IB_DEFAULT_RETRY_COUNT 1
Andy Grover3ba23ad2009-07-17 13:13:22 +000022
Andy Groverec162272009-02-24 15:30:30 +000023#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
24
Chris Mason33244122010-05-26 22:05:37 -070025#define RDS_IB_RECYCLE_BATCH_COUNT 32
26
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -040027#define RDS_IB_WC_MAX 32
28
Zach Brownea819862010-07-15 12:34:33 -070029extern struct rw_semaphore rds_ib_devices_lock;
Andy Groverec162272009-02-24 15:30:30 +000030extern struct list_head rds_ib_devices;
31
32/*
33 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
34 * try and minimize the amount of memory tied up both the device and
35 * socket receive queues.
36 */
Andy Groverec162272009-02-24 15:30:30 +000037struct rds_page_frag {
38 struct list_head f_item;
Chris Mason33244122010-05-26 22:05:37 -070039 struct list_head f_cache_entry;
Andy Grover0b088e02010-05-24 20:12:41 -070040 struct scatterlist f_sg;
Andy Groverec162272009-02-24 15:30:30 +000041};
42
43struct rds_ib_incoming {
44 struct list_head ii_frags;
Chris Mason33244122010-05-26 22:05:37 -070045 struct list_head ii_cache_entry;
Andy Groverec162272009-02-24 15:30:30 +000046 struct rds_incoming ii_inc;
47};
48
Chris Mason33244122010-05-26 22:05:37 -070049struct rds_ib_cache_head {
50 struct list_head *first;
51 unsigned long count;
52};
53
54struct rds_ib_refill_cache {
Shan Weiae4b46e2012-11-12 15:52:01 +000055 struct rds_ib_cache_head __percpu *percpu;
Chris Mason33244122010-05-26 22:05:37 -070056 struct list_head *xfer;
57 struct list_head *ready;
58};
59
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070060/* This is the common structure for the IB private data exchange in setting up
61 * an RDS connection. The exchange is different for IPv4 and IPv6 connections.
62 * The reason is that the address size is different and the addresses
63 * exchanged are in the beginning of the structure. Hence it is not possible
64 * for interoperability if same structure is used.
65 */
66struct rds_ib_conn_priv_cmn {
67 u8 ricpc_protocol_major;
68 u8 ricpc_protocol_minor;
69 __be16 ricpc_protocol_minor_mask; /* bitmask */
Santosh Shilimkarfd261ce2018-10-13 22:13:23 +080070 u8 ricpc_dp_toss;
71 u8 ripc_reserved1;
72 __be16 ripc_reserved2;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070073 __be64 ricpc_ack_seq;
74 __be32 ricpc_credit; /* non-zero enables flow ctl */
75};
76
Andy Groverec162272009-02-24 15:30:30 +000077struct rds_ib_connect_private {
78 /* Add new fields at the end, and don't permute existing fields. */
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070079 __be32 dp_saddr;
80 __be32 dp_daddr;
81 struct rds_ib_conn_priv_cmn dp_cmn;
82};
83
84struct rds6_ib_connect_private {
85 /* Add new fields at the end, and don't permute existing fields. */
86 struct in6_addr dp_saddr;
87 struct in6_addr dp_daddr;
88 struct rds_ib_conn_priv_cmn dp_cmn;
89};
90
91#define dp_protocol_major dp_cmn.ricpc_protocol_major
92#define dp_protocol_minor dp_cmn.ricpc_protocol_minor
93#define dp_protocol_minor_mask dp_cmn.ricpc_protocol_minor_mask
94#define dp_ack_seq dp_cmn.ricpc_ack_seq
95#define dp_credit dp_cmn.ricpc_credit
96
97union rds_ib_conn_priv {
98 struct rds_ib_connect_private ricp_v4;
99 struct rds6_ib_connect_private ricp_v6;
Andy Groverec162272009-02-24 15:30:30 +0000100};
101
102struct rds_ib_send_work {
Andy Groverff3d7d32010-03-01 14:03:09 -0800103 void *s_op;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100104 union {
105 struct ib_send_wr s_wr;
106 struct ib_rdma_wr s_rdma_wr;
107 struct ib_atomic_wr s_atomic_wr;
108 };
Andy Groverec162272009-02-24 15:30:30 +0000109 struct ib_sge s_sge[RDS_IB_MAX_SGE];
110 unsigned long s_queued;
111};
112
113struct rds_ib_recv_work {
114 struct rds_ib_incoming *r_ibinc;
115 struct rds_page_frag *r_frag;
116 struct ib_recv_wr r_wr;
117 struct ib_sge r_sge[2];
118};
119
120struct rds_ib_work_ring {
121 u32 w_nr;
122 u32 w_alloc_ptr;
123 u32 w_alloc_ctr;
124 u32 w_free_ptr;
125 atomic_t w_free_ctr;
126};
127
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400128/* Rings are posted with all the allocations they'll need to queue the
129 * incoming message to the receiving socket so this can't fail.
130 * All fragments start with a header, so we can make sure we're not receiving
131 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
132 */
133struct rds_ib_ack_state {
134 u64 ack_next;
135 u64 ack_recv;
136 unsigned int ack_required:1;
137 unsigned int ack_next_valid:1;
138 unsigned int ack_recv_valid:1;
139};
140
141
Andy Groverec162272009-02-24 15:30:30 +0000142struct rds_ib_device;
143
144struct rds_ib_connection {
145
146 struct list_head ib_node;
147 struct rds_ib_device *rds_ibdev;
148 struct rds_connection *conn;
149
150 /* alphabet soup, IBTA style */
151 struct rdma_cm_id *i_cm_id;
152 struct ib_pd *i_pd;
Andy Groverec162272009-02-24 15:30:30 +0000153 struct ib_cq *i_send_cq;
154 struct ib_cq *i_recv_cq;
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400155 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400156 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
157
santosh.shilimkar@oracle.comad6832f2016-03-01 15:20:53 -0800158 /* To control the number of wrs from fastreg */
159 atomic_t i_fastreg_wrs;
Santosh Shilimkar56012452016-03-08 09:19:01 -0800160 atomic_t i_fastunreg_wrs;
santosh.shilimkar@oracle.comad6832f2016-03-01 15:20:53 -0800161
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400162 /* interrupt handling */
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400163 struct tasklet_struct i_send_tasklet;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400164 struct tasklet_struct i_recv_tasklet;
Andy Groverec162272009-02-24 15:30:30 +0000165
166 /* tx */
167 struct rds_ib_work_ring i_send_ring;
Andy Groverff3d7d32010-03-01 14:03:09 -0800168 struct rm_data_op *i_data_op;
Andy Groverec162272009-02-24 15:30:30 +0000169 struct rds_header *i_send_hdrs;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800170 dma_addr_t i_send_hdrs_dma;
Andy Groverec162272009-02-24 15:30:30 +0000171 struct rds_ib_send_work *i_sends;
Zach Brownf0460112010-07-14 13:55:35 -0700172 atomic_t i_signaled_sends;
Andy Groverec162272009-02-24 15:30:30 +0000173
174 /* rx */
175 struct mutex i_recv_mutex;
176 struct rds_ib_work_ring i_recv_ring;
177 struct rds_ib_incoming *i_ibinc;
178 u32 i_recv_data_rem;
179 struct rds_header *i_recv_hdrs;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800180 dma_addr_t i_recv_hdrs_dma;
Andy Groverec162272009-02-24 15:30:30 +0000181 struct rds_ib_recv_work *i_recvs;
Andy Groverec162272009-02-24 15:30:30 +0000182 u64 i_ack_recv; /* last ACK received */
Chris Mason33244122010-05-26 22:05:37 -0700183 struct rds_ib_refill_cache i_cache_incs;
184 struct rds_ib_refill_cache i_cache_frags;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700185 atomic_t i_cache_allocs;
Andy Groverec162272009-02-24 15:30:30 +0000186
187 /* sending acks */
188 unsigned long i_ack_flags;
Andy Grover8cbd9602009-04-01 08:20:20 +0000189#ifdef KERNEL_HAS_ATOMIC64
190 atomic64_t i_ack_next; /* next ACK to send */
191#else
192 spinlock_t i_ack_lock; /* protect i_ack_next */
Andy Groverec162272009-02-24 15:30:30 +0000193 u64 i_ack_next; /* next ACK to send */
Andy Grover8cbd9602009-04-01 08:20:20 +0000194#endif
Andy Groverec162272009-02-24 15:30:30 +0000195 struct rds_header *i_ack;
196 struct ib_send_wr i_ack_wr;
197 struct ib_sge i_ack_sge;
Bart Van Assched43dbac2017-01-20 13:04:10 -0800198 dma_addr_t i_ack_dma;
Andy Groverec162272009-02-24 15:30:30 +0000199 unsigned long i_ack_queued;
200
201 /* Flow control related information
202 *
203 * Our algorithm uses a pair variables that we need to access
204 * atomically - one for the send credits, and one posted
205 * recv credits we need to transfer to remote.
206 * Rather than protect them using a slow spinlock, we put both into
207 * a single atomic_t and update it using cmpxchg
208 */
209 atomic_t i_credits;
210
211 /* Protocol version specific information */
212 unsigned int i_flowctl:1; /* enable/disable flow ctl */
213
214 /* Batched completions */
215 unsigned int i_unsignaled_wrs;
Santosh Shilimkar581d53c2016-07-09 18:31:38 -0700216
217 /* Endpoint role in connection */
218 bool i_active_side;
Santosh Shilimkarcf657262016-09-29 11:07:11 -0700219 atomic_t i_cq_quiesce;
Santosh Shilimkarbe2f76e2016-07-04 16:16:36 -0700220
221 /* Send/Recv vectors */
222 int i_scq_vector;
223 int i_rcq_vector;
Andy Groverec162272009-02-24 15:30:30 +0000224};
225
226/* This assumes that atomic_t is at least 32 bits */
227#define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
228#define IB_GET_POST_CREDITS(v) ((v) >> 16)
229#define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
230#define IB_SET_POST_CREDITS(v) ((v) << 16)
231
232struct rds_ib_ipaddr {
233 struct list_head list;
234 __be32 ipaddr;
Santosh Shilimkar59fe4602012-02-03 11:09:23 -0500235 struct rcu_head rcu;
Andy Groverec162272009-02-24 15:30:30 +0000236};
237
Santosh Shilimkar06766512015-09-10 21:20:57 -0700238enum {
239 RDS_IB_MR_8K_POOL,
240 RDS_IB_MR_1M_POOL,
241};
242
Andy Groverec162272009-02-24 15:30:30 +0000243struct rds_ib_device {
244 struct list_head list;
245 struct list_head ipaddr_list;
246 struct list_head conn_list;
247 struct ib_device *dev;
248 struct ib_pd *pd;
santosh.shilimkar@oracle.com2cb29122016-03-01 15:20:52 -0800249 bool use_fastreg;
250
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800251 unsigned int max_mrs;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700252 struct rds_ib_mr_pool *mr_1m_pool;
253 struct rds_ib_mr_pool *mr_8k_pool;
254 unsigned int fmr_max_remaps;
santosh.shilimkar@oracle.comf6df6832016-03-01 15:20:46 -0800255 unsigned int max_8k_mrs;
256 unsigned int max_1m_mrs;
Andy Groverec162272009-02-24 15:30:30 +0000257 int max_sge;
258 unsigned int max_wrs;
Andy Grover40589e72010-01-12 10:50:48 -0800259 unsigned int max_initiator_depth;
260 unsigned int max_responder_resources;
Andy Groverec162272009-02-24 15:30:30 +0000261 spinlock_t spinlock; /* protect the above */
Reshetova, Elena50d61ff2017-07-04 15:53:15 +0300262 refcount_t refcount;
Zach Brown3e0249f2010-05-18 15:48:51 -0700263 struct work_struct free_work;
Santosh Shilimkarbe2f76e2016-07-04 16:16:36 -0700264 int *vector_load;
Andy Groverec162272009-02-24 15:30:30 +0000265};
266
Bart Van Assche5f68dca2017-01-20 13:04:34 -0800267#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent)
Andy Grovere4c52c92010-04-23 10:49:53 -0700268#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
269
Andy Groverec162272009-02-24 15:30:30 +0000270/* bits for i_ack_flags */
271#define IB_ACK_IN_FLIGHT 0
272#define IB_ACK_REQUESTED 1
273
274/* Magic WR_ID for ACKs */
275#define RDS_IB_ACK_WR_ID (~(u64) 0)
276
277struct rds_ib_statistics {
278 uint64_t s_ib_connect_raced;
279 uint64_t s_ib_listen_closed_stale;
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400280 uint64_t s_ib_evt_handler_call;
281 uint64_t s_ib_tasklet_call;
Andy Groverec162272009-02-24 15:30:30 +0000282 uint64_t s_ib_tx_cq_event;
283 uint64_t s_ib_tx_ring_full;
284 uint64_t s_ib_tx_throttle;
285 uint64_t s_ib_tx_sg_mapping_failure;
286 uint64_t s_ib_tx_stalled;
287 uint64_t s_ib_tx_credit_updates;
Andy Groverec162272009-02-24 15:30:30 +0000288 uint64_t s_ib_rx_cq_event;
289 uint64_t s_ib_rx_ring_empty;
290 uint64_t s_ib_rx_refill_from_cq;
291 uint64_t s_ib_rx_refill_from_thread;
292 uint64_t s_ib_rx_alloc_limit;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700293 uint64_t s_ib_rx_total_frags;
294 uint64_t s_ib_rx_total_incs;
Andy Groverec162272009-02-24 15:30:30 +0000295 uint64_t s_ib_rx_credit_updates;
296 uint64_t s_ib_ack_sent;
297 uint64_t s_ib_ack_send_failure;
298 uint64_t s_ib_ack_send_delayed;
299 uint64_t s_ib_ack_send_piggybacked;
300 uint64_t s_ib_ack_received;
Santosh Shilimkar06766512015-09-10 21:20:57 -0700301 uint64_t s_ib_rdma_mr_8k_alloc;
302 uint64_t s_ib_rdma_mr_8k_free;
303 uint64_t s_ib_rdma_mr_8k_used;
304 uint64_t s_ib_rdma_mr_8k_pool_flush;
305 uint64_t s_ib_rdma_mr_8k_pool_wait;
306 uint64_t s_ib_rdma_mr_8k_pool_depleted;
307 uint64_t s_ib_rdma_mr_1m_alloc;
308 uint64_t s_ib_rdma_mr_1m_free;
309 uint64_t s_ib_rdma_mr_1m_used;
310 uint64_t s_ib_rdma_mr_1m_pool_flush;
311 uint64_t s_ib_rdma_mr_1m_pool_wait;
312 uint64_t s_ib_rdma_mr_1m_pool_depleted;
santosh.shilimkar@oracle.comdb427532016-03-01 15:20:51 -0800313 uint64_t s_ib_rdma_mr_8k_reused;
314 uint64_t s_ib_rdma_mr_1m_reused;
Andy Grover51e2cba2010-03-29 17:47:30 -0700315 uint64_t s_ib_atomic_cswp;
316 uint64_t s_ib_atomic_fadd;
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700317 uint64_t s_ib_recv_added_to_cache;
318 uint64_t s_ib_recv_removed_from_cache;
Andy Groverec162272009-02-24 15:30:30 +0000319};
320
321extern struct workqueue_struct *rds_ib_wq;
322
323/*
324 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
325 * doesn't define it.
326 */
327static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200328 struct scatterlist *sglist,
329 unsigned int sg_dma_len,
330 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000331{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200332 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000333 unsigned int i;
334
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200335 for_each_sg(sglist, sg, sg_dma_len, i) {
Bart Van Asschea163afc2019-01-31 08:30:34 -0800336 ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
337 sg_dma_len(sg), direction);
Andy Groverec162272009-02-24 15:30:30 +0000338 }
339}
340#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
341
342static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200343 struct scatterlist *sglist,
344 unsigned int sg_dma_len,
345 int direction)
Andy Groverec162272009-02-24 15:30:30 +0000346{
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200347 struct scatterlist *sg;
Andy Groverec162272009-02-24 15:30:30 +0000348 unsigned int i;
349
Fabian Frederickd2a9ec62015-06-16 20:44:07 +0200350 for_each_sg(sglist, sg, sg_dma_len, i) {
Bart Van Asschea163afc2019-01-31 08:30:34 -0800351 ib_dma_sync_single_for_device(dev, sg_dma_address(sg),
352 sg_dma_len(sg), direction);
Andy Groverec162272009-02-24 15:30:30 +0000353 }
354}
355#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
356
357
358/* ib.c */
359extern struct rds_transport rds_ib_transport;
Zach Brown3e0249f2010-05-18 15:48:51 -0700360struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
361void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
Andy Groverec162272009-02-24 15:30:30 +0000362extern struct ib_client rds_ib_client;
363
Andy Grover3ba23ad2009-07-17 13:13:22 +0000364extern unsigned int rds_ib_retry_count;
Andy Groverec162272009-02-24 15:30:30 +0000365
366extern spinlock_t ib_nodev_conns_lock;
367extern struct list_head ib_nodev_conns;
368
369/* ib_cm.c */
370int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
371void rds_ib_conn_free(void *arg);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700372int rds_ib_conn_path_connect(struct rds_conn_path *cp);
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700373void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
Andy Groverec162272009-02-24 15:30:30 +0000374void rds_ib_state_change(struct sock *sk);
Zach Brownef87b7e2010-07-09 12:26:20 -0700375int rds_ib_listen_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000376void rds_ib_listen_stop(void);
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200377__printf(2, 3)
Andy Groverec162272009-02-24 15:30:30 +0000378void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
379int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700380 struct rdma_cm_event *event, bool isv6);
381int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
Andy Groverec162272009-02-24 15:30:30 +0000382void rds_ib_cm_connect_complete(struct rds_connection *conn,
383 struct rdma_cm_event *event);
384
385
386#define rds_ib_conn_error(conn, fmt...) \
387 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
388
389/* ib_rdma.c */
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700390int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
391 struct in6_addr *ipaddr);
Andy Grover745cbcc2009-04-01 08:20:19 +0000392void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
393void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700394void rds_ib_destroy_nodev_conns(void);
Avinash Repaka16591852016-03-01 15:20:54 -0800395void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000396
397/* ib_recv.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700398int rds_ib_recv_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000399void rds_ib_recv_exit(void);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700400int rds_ib_recv_path(struct rds_conn_path *conn);
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700401int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
Chris Mason33244122010-05-26 22:05:37 -0700402void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700403void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
Andy Groverec162272009-02-24 15:30:30 +0000404void rds_ib_inc_free(struct rds_incoming *inc);
Al Viroc310e722014-11-20 09:21:14 -0500405int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400406void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
407 struct rds_ib_ack_state *state);
Andy Groverd521b632009-10-30 08:51:57 +0000408void rds_ib_recv_tasklet_fn(unsigned long data);
Andy Groverec162272009-02-24 15:30:30 +0000409void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
410void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
411void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
412void rds_ib_attempt_ack(struct rds_ib_connection *ic);
413void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
414u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400415void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
Andy Groverec162272009-02-24 15:30:30 +0000416
417/* ib_ring.c */
418void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
419void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
420u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
421void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
422void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
423int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
424int rds_ib_ring_low(struct rds_ib_work_ring *ring);
425u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
426u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
427extern wait_queue_head_t rds_ib_ring_empty_wait;
428
429/* ib_send.c */
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700430void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
Andy Groverec162272009-02-24 15:30:30 +0000431int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
432 unsigned int hdr_off, unsigned int sg, unsigned int off);
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400433void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
Andy Groverec162272009-02-24 15:30:30 +0000434void rds_ib_send_init_ring(struct rds_ib_connection *ic);
435void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800436int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000437void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
438void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
439int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
Steve Wise7b70d032009-04-09 14:09:39 +0000440 u32 *adv_credits, int need_posted, int max_posted);
Andy Groverff3d7d32010-03-01 14:03:09 -0800441int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
Andy Groverec162272009-02-24 15:30:30 +0000442
443/* ib_stats.c */
David S. Miller16fdf8b2018-09-23 12:25:15 -0700444DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
Andy Groverec162272009-02-24 15:30:30 +0000445#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700446#define rds_ib_stats_add(member, count) \
447 rds_stats_add_which(rds_ib_stats, member, count)
Andy Groverec162272009-02-24 15:30:30 +0000448unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
449 unsigned int avail);
450
451/* ib_sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700452int rds_ib_sysctl_init(void);
Andy Groverec162272009-02-24 15:30:30 +0000453void rds_ib_sysctl_exit(void);
454extern unsigned long rds_ib_sysctl_max_send_wr;
455extern unsigned long rds_ib_sysctl_max_recv_wr;
456extern unsigned long rds_ib_sysctl_max_unsig_wrs;
457extern unsigned long rds_ib_sysctl_max_unsig_bytes;
458extern unsigned long rds_ib_sysctl_max_recv_allocation;
459extern unsigned int rds_ib_sysctl_flow_control;
Andy Groverec162272009-02-24 15:30:30 +0000460
Andy Groverec162272009-02-24 15:30:30 +0000461#endif