blob: ff537bb11411b38c2a0946e51dccafb8b02a9889 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andy Grover39de8282009-02-24 15:30:19 +00002#ifndef _RDS_RDS_H
3#define _RDS_RDS_H
4
5#include <net/sock.h>
6#include <linux/scatterlist.h>
7#include <linux/highmem.h>
8#include <rdma/rdma_cm.h>
9#include <linux/mutex.h>
10#include <linux/rds.h>
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -070011#include <linux/rhashtable.h>
Reshetova, Elenab7f02922017-07-04 15:53:16 +030012#include <linux/refcount.h>
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070013#include <linux/in6.h>
Andy Grover39de8282009-02-24 15:30:19 +000014
15#include "info.h"
16
17/*
18 * RDS Network protocol version
19 */
20#define RDS_PROTOCOL_3_0 0x0300
21#define RDS_PROTOCOL_3_1 0x0301
22#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
23#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
24#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
25#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
26
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -070027/* The following ports, 16385, 18634, 18635, are registered with IANA as
28 * the ports to be used for RDS over TCP and UDP. Currently, only RDS over
29 * TCP and RDS over IB/RDMA are implemented. 18634 is the historical value
30 * used for the RDMA_CM listener port. RDS/TCP uses port 16385. After
31 * IPv6 work, RDMA_CM also uses 16385 as the listener port. 18634 is kept
32 * to ensure compatibility with older RDS modules. Those ports are defined
33 * in each transport's header file.
Andy Grover39de8282009-02-24 15:30:19 +000034 */
35#define RDS_PORT 18634
36
Andy Grover8cbd9602009-04-01 08:20:20 +000037#ifdef ATOMIC64_INIT
38#define KERNEL_HAS_ATOMIC64
39#endif
40
shamir rabinovitchff570872016-10-27 05:46:38 -040041#ifdef RDS_DEBUG
Andy Grover39de8282009-02-24 15:30:19 +000042#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
43#else
44/* sigh, pr_debug() causes unused variable warnings */
Joe Perchesb9075fa2011-10-31 17:11:33 -070045static inline __printf(1, 2)
46void rdsdebug(char *fmt, ...)
Andy Grover39de8282009-02-24 15:30:19 +000047{
48}
49#endif
50
51/* XXX is there one of these somewhere? */
52#define ceil(x, y) \
53 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
54
55#define RDS_FRAG_SHIFT 12
56#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
57
Avinash Repakaf9fb69a2016-02-29 15:30:57 -080058/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
59#define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
60
Andy Grover39de8282009-02-24 15:30:19 +000061#define RDS_CONG_MAP_BYTES (65536 / 8)
Andy Grover39de8282009-02-24 15:30:19 +000062#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
63#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
64
65struct rds_cong_map {
66 struct rb_node m_rb_node;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070067 struct in6_addr m_addr;
Andy Grover39de8282009-02-24 15:30:19 +000068 wait_queue_head_t m_waitq;
69 struct list_head m_conn_list;
70 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
71};
72
73
74/*
75 * This is how we will track the connection state:
76 * A connection is always in one of the following
77 * states. Updates to the state are atomic and imply
78 * a memory barrier.
79 */
80enum {
81 RDS_CONN_DOWN = 0,
82 RDS_CONN_CONNECTING,
83 RDS_CONN_DISCONNECTING,
84 RDS_CONN_UP,
Sowmini Varadhan9c794402016-06-04 14:00:00 -070085 RDS_CONN_RESETTING,
Andy Grover39de8282009-02-24 15:30:19 +000086 RDS_CONN_ERROR,
87};
88
89/* Bits for c_flags */
90#define RDS_LL_SEND_FULL 0
91#define RDS_RECONNECT_PENDING 1
Zach Brown0f4b1c72010-06-04 14:41:41 -070092#define RDS_IN_XMIT 2
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -070093#define RDS_RECV_REFILL 3
Sowmini Varadhanc90ecbf2018-01-04 06:52:59 -080094#define RDS_DESTROY_PENDING 4
Andy Grover39de8282009-02-24 15:30:19 +000095
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070096/* Max number of multipaths per RDS connection. Must be a power of 2 */
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -070097#define RDS_MPATH_WORKERS 8
98#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
99 (rs)->rs_hash_initval) & ((n) - 1))
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700100
Sowmini Varadhan00354de2017-06-15 11:28:54 -0700101#define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
102
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700103/* Per mpath connection state */
104struct rds_conn_path {
105 struct rds_connection *cp_conn;
106 struct rds_message *cp_xmit_rm;
107 unsigned long cp_xmit_sg;
108 unsigned int cp_xmit_hdr_off;
109 unsigned int cp_xmit_data_off;
110 unsigned int cp_xmit_atomic_sent;
111 unsigned int cp_xmit_rdma_sent;
112 unsigned int cp_xmit_data_sent;
113
114 spinlock_t cp_lock; /* protect msg queues */
115 u64 cp_next_tx_seq;
116 struct list_head cp_send_queue;
117 struct list_head cp_retrans;
118
119 u64 cp_next_rx_seq;
120
121 void *cp_transport_data;
122
123 atomic_t cp_state;
124 unsigned long cp_send_gen;
125 unsigned long cp_flags;
126 unsigned long cp_reconnect_jiffies;
127 struct delayed_work cp_send_w;
128 struct delayed_work cp_recv_w;
129 struct delayed_work cp_conn_w;
130 struct work_struct cp_down_w;
131 struct mutex cp_cm_lock; /* protect cp_state & cm */
132 wait_queue_head_t cp_waitq;
133
134 unsigned int cp_unacked_packets;
135 unsigned int cp_unacked_bytes;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700136 unsigned int cp_index;
137};
138
139/* One rds_connection per RDS address pair */
Andy Grover39de8282009-02-24 15:30:19 +0000140struct rds_connection {
141 struct hlist_node c_hash_node;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700142 struct in6_addr c_laddr;
143 struct in6_addr c_faddr;
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700144 int c_dev_if; /* ifindex used for this conn */
145 int c_bound_if; /* ifindex of c_laddr */
Sowmini Varadhan3b20fc32015-09-30 16:54:07 -0400146 unsigned int c_loopback:1,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700147 c_isv6:1,
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700148 c_ping_triggered:1,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700149 c_pad_to_32:29;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700150 int c_npaths;
Andy Grover39de8282009-02-24 15:30:19 +0000151 struct rds_connection *c_passive;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700152 struct rds_transport *c_trans;
Andy Grover39de8282009-02-24 15:30:19 +0000153
154 struct rds_cong_map *c_lcong;
155 struct rds_cong_map *c_fcong;
156
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700157 /* Protocol version */
158 unsigned int c_version;
Sowmini Varadhan681648e2017-11-30 11:11:28 -0800159 possible_net_t c_net;
Andy Grover39de8282009-02-24 15:30:19 +0000160
161 struct list_head c_map_item;
162 unsigned long c_map_queued;
Andy Grover39de8282009-02-24 15:30:19 +0000163
Sowmini Varadhan840df162017-08-02 10:34:31 -0700164 struct rds_conn_path *c_path;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700165 wait_queue_head_t c_hs_waitq; /* handshake waitq */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800166
167 u32 c_my_gen_num;
168 u32 c_peer_gen_num;
Andy Grover39de8282009-02-24 15:30:19 +0000169};
170
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400171static inline
172struct net *rds_conn_net(struct rds_connection *conn)
173{
Sowmini Varadhan681648e2017-11-30 11:11:28 -0800174 return read_pnet(&conn->c_net);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400175}
176
177static inline
178void rds_conn_net_set(struct rds_connection *conn, struct net *net)
179{
Sowmini Varadhan681648e2017-11-30 11:11:28 -0800180 write_pnet(&conn->c_net, net);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400181}
182
Andy Grover39de8282009-02-24 15:30:19 +0000183#define RDS_FLAG_CONG_BITMAP 0x01
184#define RDS_FLAG_ACK_REQUIRED 0x02
185#define RDS_FLAG_RETRANSMITTED 0x04
Steve Wise7b70d032009-04-09 14:09:39 +0000186#define RDS_MAX_ADV_CREDIT 255
Andy Grover39de8282009-02-24 15:30:19 +0000187
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700188/* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
189 * probe to exchange control information before establishing a connection.
190 * Currently the control information that is exchanged is the number of
191 * supported paths. If the peer is a legacy (older kernel revision) peer,
192 * it would return a pong message without additional control information
193 * that would then alert the sender that the peer was an older rev.
194 */
195#define RDS_FLAG_PROBE_PORT 1
196#define RDS_HS_PROBE(sport, dport) \
197 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
198 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
Andy Grover39de8282009-02-24 15:30:19 +0000199/*
200 * Maximum space available for extension headers.
201 */
202#define RDS_HEADER_EXT_SPACE 16
203
204struct rds_header {
205 __be64 h_sequence;
206 __be64 h_ack;
207 __be32 h_len;
208 __be16 h_sport;
209 __be16 h_dport;
210 u8 h_flags;
211 u8 h_credit;
212 u8 h_padding[4];
213 __sum16 h_csum;
214
215 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
216};
217
218/*
219 * Reserved - indicates end of extensions
220 */
221#define RDS_EXTHDR_NONE 0
222
223/*
224 * This extension header is included in the very
225 * first message that is sent on a new connection,
226 * and identifies the protocol level. This will help
227 * rolling updates if a future change requires breaking
228 * the protocol.
229 * NB: This is no longer true for IB, where we do a version
230 * negotiation during the connection setup phase (protocol
231 * version information is included in the RDMA CM private data).
232 */
233#define RDS_EXTHDR_VERSION 1
234struct rds_ext_header_version {
235 __be32 h_version;
236};
237
238/*
239 * This extension header is included in the RDS message
240 * chasing an RDMA operation.
241 */
242#define RDS_EXTHDR_RDMA 2
243struct rds_ext_header_rdma {
244 __be32 h_rdma_rkey;
245};
246
247/*
248 * This extension header tells the peer about the
249 * destination <R_Key,offset> of the requested RDMA
250 * operation.
251 */
252#define RDS_EXTHDR_RDMA_DEST 3
253struct rds_ext_header_rdma_dest {
254 __be32 h_rdma_rkey;
255 __be32 h_rdma_offset;
256};
257
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700258/* Extension header announcing number of paths.
259 * Implicit length = 2 bytes.
260 */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800261#define RDS_EXTHDR_NPATHS 5
262#define RDS_EXTHDR_GEN_NUM 6
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700263
Andy Grover39de8282009-02-24 15:30:19 +0000264#define __RDS_EXTHDR_MAX 16 /* for now */
Santosh Shilimkar32890252016-07-04 22:35:15 -0700265#define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
266#define RDS_MSG_RX_HDR 0
267#define RDS_MSG_RX_START 1
268#define RDS_MSG_RX_END 2
269#define RDS_MSG_RX_CMSG 3
Andy Grover39de8282009-02-24 15:30:19 +0000270
271struct rds_incoming {
Reshetova, Elenab7f02922017-07-04 15:53:16 +0300272 refcount_t i_refcount;
Andy Grover39de8282009-02-24 15:30:19 +0000273 struct list_head i_item;
274 struct rds_connection *i_conn;
Sowmini Varadhanef9e62c2016-06-13 09:44:28 -0700275 struct rds_conn_path *i_conn_path;
Andy Grover39de8282009-02-24 15:30:19 +0000276 struct rds_header i_hdr;
277 unsigned long i_rx_jiffies;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700278 struct in6_addr i_saddr;
Andy Grover39de8282009-02-24 15:30:19 +0000279
280 rds_rdma_cookie_t i_rdma_cookie;
santosh.shilimkar@oracle.com5711f8b2016-03-01 15:20:43 -0800281 struct timeval i_rx_tstamp;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700282 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
Andy Grover39de8282009-02-24 15:30:19 +0000283};
284
Andy Grover21f79af2010-01-12 12:57:27 -0800285struct rds_mr {
286 struct rb_node r_rb_node;
Reshetova, Elena803ea852017-07-04 15:53:17 +0300287 refcount_t r_refcount;
Andy Grover21f79af2010-01-12 12:57:27 -0800288 u32 r_key;
289
290 /* A copy of the creation flags */
291 unsigned int r_use_once:1;
292 unsigned int r_invalidate:1;
293 unsigned int r_write:1;
294
295 /* This is for RDS_MR_DEAD.
296 * It would be nice & consistent to make this part of the above
297 * bit field here, but we need to use test_and_set_bit.
298 */
299 unsigned long r_state;
300 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
301 struct rds_transport *r_trans;
302 void *r_trans_private;
303};
304
305/* Flags for mr->r_state */
306#define RDS_MR_DEAD 0
307
Andy Grover21f79af2010-01-12 12:57:27 -0800308static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
309{
310 return r_key | (((u64) offset) << 32);
311}
312
313static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
314{
315 return cookie;
316}
317
318static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
319{
320 return cookie >> 32;
321}
322
Andy Grover15133f62010-01-12 14:33:38 -0800323/* atomic operation types */
324#define RDS_ATOMIC_TYPE_CSWP 0
325#define RDS_ATOMIC_TYPE_FADD 1
326
Andy Grover39de8282009-02-24 15:30:19 +0000327/*
328 * m_sock_item and m_conn_item are on lists that are serialized under
329 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
330 * the message will not be put back on the retransmit list after being sent.
331 * messages that are canceled while being sent rely on this.
332 *
333 * m_inc is used by loopback so that it can pass an incoming message straight
334 * back up into the rx path. It embeds a wire header which is also used by
335 * the send path, which is kind of awkward.
336 *
337 * m_sock_item indicates the message's presence on a socket's send or receive
338 * queue. m_rs will point to that socket.
339 *
340 * m_daddr is used by cancellation to prune messages to a given destination.
341 *
342 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
343 * nesting. As paths iterate over messages on a sock, or conn, they must
344 * also lock the conn, or sock, to remove the message from those lists too.
345 * Testing the flag to determine if the message is still on the lists lets
346 * us avoid testing the list_head directly. That means each path can use
347 * the message's list_head to keep it on a local list while juggling locks
348 * without confusing the other path.
349 *
350 * m_ack_seq is an optional field set by transports who need a different
351 * sequence number range to invalidate. They can use this in a callback
352 * that they pass to rds_send_drop_acked() to see if each message has been
353 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
354 * had ack_seq set yet.
355 */
356#define RDS_MSG_ON_SOCK 1
357#define RDS_MSG_ON_CONN 2
358#define RDS_MSG_HAS_ACK_SEQ 3
359#define RDS_MSG_ACK_REQUIRED 4
360#define RDS_MSG_RETRANSMITTED 5
361#define RDS_MSG_MAPPED 6
362#define RDS_MSG_PAGEVEC 7
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800363#define RDS_MSG_FLUSH 8
Andy Grover39de8282009-02-24 15:30:19 +0000364
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800365struct rds_znotifier {
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800366 struct mmpin z_mmp;
367 u32 z_cookie;
368};
369
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800370struct rds_msg_zcopy_info {
371 struct list_head rs_zcookie_next;
372 union {
373 struct rds_znotifier znotif;
374 struct rds_zcopy_cookies zcookies;
375 };
376};
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800377
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800378struct rds_msg_zcopy_queue {
379 struct list_head zcookie_head;
380 spinlock_t lock; /* protects zcookie_head queue */
381};
382
383static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800384{
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800385 spin_lock_init(&q->lock);
386 INIT_LIST_HEAD(&q->zcookie_head);
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800387}
388
Andy Grover39de8282009-02-24 15:30:19 +0000389struct rds_message {
Reshetova, Elena6c5a1c42017-07-04 15:53:18 +0300390 refcount_t m_refcount;
Andy Grover39de8282009-02-24 15:30:19 +0000391 struct list_head m_sock_item;
392 struct list_head m_conn_item;
393 struct rds_incoming m_inc;
394 u64 m_ack_seq;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700395 struct in6_addr m_daddr;
Andy Grover39de8282009-02-24 15:30:19 +0000396 unsigned long m_flags;
397
398 /* Never access m_rs without holding m_rs_lock.
399 * Lock nesting is
400 * rm->m_rs_lock
401 * -> rs->rs_lock
402 */
403 spinlock_t m_rs_lock;
Chris Masonc83188d2010-04-21 13:09:28 -0700404 wait_queue_head_t m_flush_wait;
405
Andy Grover39de8282009-02-24 15:30:19 +0000406 struct rds_sock *m_rs;
Andy Grover7e3bd652010-03-01 16:04:59 -0800407
408 /* cookie to send to remote, in rds header */
Andy Grover39de8282009-02-24 15:30:19 +0000409 rds_rdma_cookie_t m_rdma_cookie;
Andy Grover7e3bd652010-03-01 16:04:59 -0800410
411 unsigned int m_used_sgs;
412 unsigned int m_total_sgs;
413
Andy Groverff3d7d32010-03-01 14:03:09 -0800414 void *m_final_op;
415
Andy Grovere7791372010-01-12 12:15:02 -0800416 struct {
Andy Grover15133f62010-01-12 14:33:38 -0800417 struct rm_atomic_op {
418 int op_type;
Andy Grover20c72bd2010-08-25 05:51:28 -0700419 union {
420 struct {
421 uint64_t compare;
422 uint64_t swap;
423 uint64_t compare_mask;
424 uint64_t swap_mask;
425 } op_m_cswp;
426 struct {
427 uint64_t add;
428 uint64_t nocarry_mask;
429 } op_m_fadd;
430 };
Andy Grover15133f62010-01-12 14:33:38 -0800431
432 u32 op_rkey;
433 u64 op_remote_addr;
434 unsigned int op_notify:1;
435 unsigned int op_recverr:1;
436 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800437 unsigned int op_silent:1;
Andy Grover15133f62010-01-12 14:33:38 -0800438 unsigned int op_active:1;
Andy Grover15133f62010-01-12 14:33:38 -0800439 struct scatterlist *op_sg;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800440 struct rds_notifier *op_notifier;
Andy Grover15133f62010-01-12 14:33:38 -0800441
442 struct rds_mr *op_rdma_mr;
443 } atomic;
444 struct rm_rdma_op {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800445 u32 op_rkey;
446 u64 op_remote_addr;
447 unsigned int op_write:1;
448 unsigned int op_fence:1;
449 unsigned int op_notify:1;
450 unsigned int op_recverr:1;
451 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800452 unsigned int op_silent:1;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800453 unsigned int op_active:1;
454 unsigned int op_bytes;
455 unsigned int op_nents;
456 unsigned int op_count;
457 struct scatterlist *op_sg;
458 struct rds_notifier *op_notifier;
459
460 struct rds_mr *op_rdma_mr;
Andy Grovere7791372010-01-12 12:15:02 -0800461 } rdma;
Andy Grover15133f62010-01-12 14:33:38 -0800462 struct rm_data_op {
Andy Grover241eef32010-01-19 21:25:26 -0800463 unsigned int op_active:1;
Santosh Shilimkar941f8d52016-02-18 20:06:47 -0800464 unsigned int op_notify:1;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800465 unsigned int op_nents;
466 unsigned int op_count;
Wengang Wangd655a9f2015-05-21 13:11:40 +0800467 unsigned int op_dmasg;
468 unsigned int op_dmaoff;
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800469 struct rds_znotifier *op_mmp_znotifier;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800470 struct scatterlist *op_sg;
Andy Grovere7791372010-01-12 12:15:02 -0800471 } data;
472 };
Andy Grover39de8282009-02-24 15:30:19 +0000473};
474
475/*
476 * The RDS notifier is used (optionally) to tell the application about
477 * completed RDMA operations. Rather than keeping the whole rds message
478 * around on the queue, we allocate a small notifier that is put on the
479 * socket's notifier_list. Notifications are delivered to the application
480 * through control messages.
481 */
482struct rds_notifier {
483 struct list_head n_list;
484 uint64_t n_user_token;
485 int n_status;
486};
487
Santosh Shilimkarf1693c62018-06-14 11:52:34 -0700488/* Available as part of RDS core, so doesn't need to participate
489 * in get_preferred transport etc
490 */
491#define RDS_TRANS_LOOP 3
492
Andy Grover39de8282009-02-24 15:30:19 +0000493/**
494 * struct rds_transport - transport specific behavioural hooks
495 *
496 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
497 * part of a message. The caller serializes on the send_sem so this
498 * doesn't need to be reentrant for a given conn. The header must be
499 * sent before the data payload. .xmit must be prepared to send a
500 * message with no data payload. .xmit should return the number of
501 * bytes that were sent down the connection, including header bytes.
502 * Returning 0 tells the caller that it doesn't need to perform any
503 * additional work now. This is usually the case when the transport has
504 * filled the sending queue for its connection and will handle
505 * triggering the rds thread to continue the send when space becomes
506 * available. Returning -EAGAIN tells the caller to retry the send
507 * immediately. Returning -ENOMEM tells the caller to retry the send at
508 * some point in the future.
509 *
510 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
511 * it returns the connection can not call rds_recv_incoming().
512 * This will only be called once after conn_connect returns
513 * non-zero success and will The caller serializes this with
514 * the send and connecting paths (xmit_* and conn_*). The
515 * transport is responsible for other serialization, including
516 * rds_recv_incoming(). This is called in process context but
517 * should try hard not to block.
Andy Grover39de8282009-02-24 15:30:19 +0000518 */
519
520struct rds_transport {
521 char t_name[TRANSNAMSIZ];
522 struct list_head t_item;
523 struct module *t_owner;
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700524 unsigned int t_prefer_loopback:1,
525 t_mp_capable:1;
Andy Grover335776b2009-08-21 12:28:34 +0000526 unsigned int t_type;
Andy Grover39de8282009-02-24 15:30:19 +0000527
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700528 int (*laddr_check)(struct net *net, const struct in6_addr *addr,
529 __u32 scope_id);
Andy Grover39de8282009-02-24 15:30:19 +0000530 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
531 void (*conn_free)(void *data);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700532 int (*conn_path_connect)(struct rds_conn_path *cp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700533 void (*conn_path_shutdown)(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700534 void (*xmit_path_prepare)(struct rds_conn_path *cp);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700535 void (*xmit_path_complete)(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000536 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
537 unsigned int hdr_off, unsigned int sg, unsigned int off);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800538 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverff3d7d32010-03-01 14:03:09 -0800539 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700540 int (*recv_path)(struct rds_conn_path *cp);
Al Viroc310e722014-11-20 09:21:14 -0500541 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000542 void (*inc_free)(struct rds_incoming *inc);
543
544 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700545 struct rdma_cm_event *event, bool isv6);
546 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6);
Andy Grover39de8282009-02-24 15:30:19 +0000547 void (*cm_connect_complete)(struct rds_connection *conn,
548 struct rdma_cm_event *event);
549
550 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
551 unsigned int avail);
552 void (*exit)(void);
553 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
554 struct rds_sock *rs, u32 *key_ret);
555 void (*sync_mr)(void *trans_private, int direction);
556 void (*free_mr)(void *trans_private, int invalidate);
557 void (*flush_mrs)(void);
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800558 bool (*t_unloading)(struct rds_connection *conn);
Andy Grover39de8282009-02-24 15:30:19 +0000559};
560
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700561/* Bind hash table key length. It is the sum of the size of a struct
562 * in6_addr, a scope_id and a port.
563 */
564#define RDS_BOUND_KEY_LEN \
565 (sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16))
566
Andy Grover39de8282009-02-24 15:30:19 +0000567struct rds_sock {
568 struct sock rs_sk;
569
570 u64 rs_user_addr;
571 u64 rs_user_bytes;
572
573 /*
574 * bound_addr used for both incoming and outgoing, no INADDR_ANY
575 * support.
576 */
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700577 struct rhash_head rs_bound_node;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700578 u8 rs_bound_key[RDS_BOUND_KEY_LEN];
579 struct sockaddr_in6 rs_bound_sin6;
580#define rs_bound_addr rs_bound_sin6.sin6_addr
581#define rs_bound_addr_v4 rs_bound_sin6.sin6_addr.s6_addr32[3]
582#define rs_bound_port rs_bound_sin6.sin6_port
583#define rs_bound_scope_id rs_bound_sin6.sin6_scope_id
584 struct in6_addr rs_conn_addr;
585#define rs_conn_addr_v4 rs_conn_addr.s6_addr32[3]
Andy Grover39de8282009-02-24 15:30:19 +0000586 __be16 rs_conn_port;
Andy Grover39de8282009-02-24 15:30:19 +0000587 struct rds_transport *rs_transport;
588
589 /*
590 * rds_sendmsg caches the conn it used the last time around.
591 * This helps avoid costly lookups.
592 */
593 struct rds_connection *rs_conn;
594
595 /* flag indicating we were congested or not */
596 int rs_congested;
Andy Groverb98ba522010-03-11 13:50:04 +0000597 /* seen congestion (ENOBUFS) when sending? */
598 int rs_seen_congestion;
Andy Grover39de8282009-02-24 15:30:19 +0000599
600 /* rs_lock protects all these adjacent members before the newline */
601 spinlock_t rs_lock;
602 struct list_head rs_send_queue;
603 u32 rs_snd_bytes;
604 int rs_rcv_bytes;
605 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
606
607 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
608 * to decide whether the application should be woken up.
609 * If not set, we use rs_cong_track to find out whether a cong map
610 * update arrived.
611 */
612 uint64_t rs_cong_mask;
613 uint64_t rs_cong_notify;
614 struct list_head rs_cong_list;
615 unsigned long rs_cong_track;
616
617 /*
618 * rs_recv_lock protects the receive queue, and is
619 * used to serialize with rds_release.
620 */
621 rwlock_t rs_recv_lock;
622 struct list_head rs_recv_queue;
623
624 /* just for stats reporting */
625 struct list_head rs_item;
626
627 /* these have their own lock */
628 spinlock_t rs_rdma_lock;
629 struct rb_root rs_rdma_keys;
630
631 /* Socket options - in case there will be more */
632 unsigned char rs_recverr,
633 rs_cong_monitor;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700634 u32 rs_hash_initval;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700635
636 /* Socket receive path trace points*/
637 u8 rs_rx_traces;
638 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800639 struct rds_msg_zcopy_queue rs_zcookie_queue;
Andy Grover39de8282009-02-24 15:30:19 +0000640};
641
642static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
643{
644 return container_of(sk, struct rds_sock, rs_sk);
645}
646static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
647{
648 return &rs->rs_sk;
649}
650
651/*
652 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
653 * to account for overhead. We don't account for overhead, we just apply
654 * the number of payload bytes to the specified value.
655 */
656static inline int rds_sk_sndbuf(struct rds_sock *rs)
657{
658 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
659}
660static inline int rds_sk_rcvbuf(struct rds_sock *rs)
661{
662 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
663}
664
665struct rds_statistics {
666 uint64_t s_conn_reset;
667 uint64_t s_recv_drop_bad_checksum;
668 uint64_t s_recv_drop_old_seq;
669 uint64_t s_recv_drop_no_sock;
670 uint64_t s_recv_drop_dead_sock;
671 uint64_t s_recv_deliver_raced;
672 uint64_t s_recv_delivered;
673 uint64_t s_recv_queued;
674 uint64_t s_recv_immediate_retry;
675 uint64_t s_recv_delayed_retry;
676 uint64_t s_recv_ack_required;
677 uint64_t s_recv_rdma_bytes;
678 uint64_t s_recv_ping;
679 uint64_t s_send_queue_empty;
680 uint64_t s_send_queue_full;
Andy Grover049ee3f2010-03-23 17:39:07 -0700681 uint64_t s_send_lock_contention;
682 uint64_t s_send_lock_queue_raced;
Andy Grover39de8282009-02-24 15:30:19 +0000683 uint64_t s_send_immediate_retry;
684 uint64_t s_send_delayed_retry;
685 uint64_t s_send_drop_acked;
686 uint64_t s_send_ack_required;
687 uint64_t s_send_queued;
688 uint64_t s_send_rdma;
689 uint64_t s_send_rdma_bytes;
690 uint64_t s_send_pong;
691 uint64_t s_page_remainder_hit;
692 uint64_t s_page_remainder_miss;
693 uint64_t s_copy_to_user;
694 uint64_t s_copy_from_user;
695 uint64_t s_cong_update_queued;
696 uint64_t s_cong_update_received;
697 uint64_t s_cong_send_error;
698 uint64_t s_cong_send_blocked;
Venkat Venkatsubra192a7982016-07-09 17:36:20 -0700699 uint64_t s_recv_bytes_added_to_socket;
700 uint64_t s_recv_bytes_removed_from_socket;
701
Andy Grover39de8282009-02-24 15:30:19 +0000702};
703
704/* af_rds.c */
705void rds_sock_addref(struct rds_sock *rs);
706void rds_sock_put(struct rds_sock *rs);
707void rds_wake_sk_sleep(struct rds_sock *rs);
708static inline void __rds_wake_sk_sleep(struct sock *sk)
709{
Eric Dumazetaa395142010-04-20 13:03:51 +0000710 wait_queue_head_t *waitq = sk_sleep(sk);
Andy Grover39de8282009-02-24 15:30:19 +0000711
712 if (!sock_flag(sk, SOCK_DEAD) && waitq)
713 wake_up(waitq);
714}
715extern wait_queue_head_t rds_poll_waitq;
716
717
718/* bind.c */
719int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
720void rds_remove_bound(struct rds_sock *rs);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700721struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
722 __u32 scope_id);
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700723int rds_bind_lock_init(void);
724void rds_bind_lock_destroy(void);
Andy Grover39de8282009-02-24 15:30:19 +0000725
726/* cong.c */
727int rds_cong_get_maps(struct rds_connection *conn);
728void rds_cong_add_conn(struct rds_connection *conn);
729void rds_cong_remove_conn(struct rds_connection *conn);
730void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
731void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
732int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
733void rds_cong_queue_updates(struct rds_cong_map *map);
734void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
735int rds_cong_updated_since(unsigned long *recent);
736void rds_cong_add_socket(struct rds_sock *);
737void rds_cong_remove_socket(struct rds_sock *);
738void rds_cong_exit(void);
739struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
740
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700741/* connection.c */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800742extern u32 rds_gen_num;
Zach Brownef87b7e2010-07-09 12:26:20 -0700743int rds_conn_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000744void rds_conn_exit(void);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400745struct rds_connection *rds_conn_create(struct net *net,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700746 const struct in6_addr *laddr,
747 const struct in6_addr *faddr,
748 struct rds_transport *trans, gfp_t gfp,
749 int dev_if);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400750struct rds_connection *rds_conn_create_outgoing(struct net *net,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700751 const struct in6_addr *laddr,
752 const struct in6_addr *faddr,
753 struct rds_transport *trans,
754 gfp_t gfp, int dev_if);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700755void rds_conn_shutdown(struct rds_conn_path *cpath);
Andy Grover39de8282009-02-24 15:30:19 +0000756void rds_conn_destroy(struct rds_connection *conn);
Andy Grover39de8282009-02-24 15:30:19 +0000757void rds_conn_drop(struct rds_connection *conn);
Sowmini Varadhanaed20a52017-07-16 16:43:46 -0700758void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
Zach Brownf3c68082010-05-24 13:14:36 -0700759void rds_conn_connect_if_down(struct rds_connection *conn);
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700760void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000761void rds_for_each_conn_info(struct socket *sock, unsigned int len,
762 struct rds_info_iterator *iter,
763 struct rds_info_lengths *lens,
764 int (*visitor)(struct rds_connection *, void *),
Salvatore Mesoracaf1cb9d62018-03-11 22:07:49 +0100765 u64 *buffer,
Andy Grover39de8282009-02-24 15:30:19 +0000766 size_t item_len);
Andy Grover39de8282009-02-24 15:30:19 +0000767
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200768__printf(2, 3)
Sowmini Varadhanfb1b3dc2016-06-13 09:44:39 -0700769void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
770#define rds_conn_path_error(cp, fmt...) \
771 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
772
Andy Grover39de8282009-02-24 15:30:19 +0000773static inline int
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700774rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
775{
776 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
777}
778
779static inline int
Andy Grover39de8282009-02-24 15:30:19 +0000780rds_conn_transition(struct rds_connection *conn, int old, int new)
781{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700782 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700783 return rds_conn_path_transition(&conn->c_path[0], old, new);
784}
785
786static inline int
787rds_conn_path_state(struct rds_conn_path *cp)
788{
789 return atomic_read(&cp->cp_state);
Andy Grover39de8282009-02-24 15:30:19 +0000790}
791
792static inline int
793rds_conn_state(struct rds_connection *conn)
794{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700795 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700796 return rds_conn_path_state(&conn->c_path[0]);
797}
798
799static inline int
800rds_conn_path_up(struct rds_conn_path *cp)
801{
802 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
Andy Grover39de8282009-02-24 15:30:19 +0000803}
804
805static inline int
806rds_conn_up(struct rds_connection *conn)
807{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700808 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700809 return rds_conn_path_up(&conn->c_path[0]);
810}
811
812static inline int
813rds_conn_path_connecting(struct rds_conn_path *cp)
814{
815 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
Andy Grover39de8282009-02-24 15:30:19 +0000816}
817
818static inline int
819rds_conn_connecting(struct rds_connection *conn)
820{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700821 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700822 return rds_conn_path_connecting(&conn->c_path[0]);
Andy Grover39de8282009-02-24 15:30:19 +0000823}
824
825/* message.c */
826struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
Andy Groverfc445082010-01-12 12:56:06 -0800827struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
Sowmini Varadhan0cebacc2018-02-15 10:49:36 -0800828int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
829 bool zcopy);
Andy Grover39de8282009-02-24 15:30:19 +0000830struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
831void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
832 __be16 dport, u64 seq);
833int rds_message_add_extension(struct rds_header *hdr,
834 unsigned int type, const void *data, unsigned int len);
835int rds_message_next_extension(struct rds_header *hdr,
836 unsigned int *pos, void *buf, unsigned int *buflen);
Andy Grover39de8282009-02-24 15:30:19 +0000837int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
Al Viroc310e722014-11-20 09:21:14 -0500838int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000839void rds_message_inc_free(struct rds_incoming *inc);
840void rds_message_addref(struct rds_message *rm);
841void rds_message_put(struct rds_message *rm);
842void rds_message_wait(struct rds_message *rm);
843void rds_message_unmapped(struct rds_message *rm);
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800844void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *info);
Andy Grover39de8282009-02-24 15:30:19 +0000845
846static inline void rds_message_make_checksum(struct rds_header *hdr)
847{
848 hdr->h_csum = 0;
849 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
850}
851
852static inline int rds_message_verify_checksum(const struct rds_header *hdr)
853{
854 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
855}
856
857
858/* page.c */
859int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
860 gfp_t gfp);
Andy Grover39de8282009-02-24 15:30:19 +0000861void rds_page_exit(void);
862
863/* recv.c */
864void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700865 struct in6_addr *saddr);
Sowmini Varadhan5e833e02016-06-13 09:44:29 -0700866void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700867 struct in6_addr *saddr);
Andy Grover39de8282009-02-24 15:30:19 +0000868void rds_inc_put(struct rds_incoming *inc);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700869void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
870 struct in6_addr *daddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800871 struct rds_incoming *inc, gfp_t gfp);
Ying Xue1b784142015-03-02 15:37:48 +0800872int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
873 int msg_flags);
Andy Grover39de8282009-02-24 15:30:19 +0000874void rds_clear_recv_queue(struct rds_sock *rs);
875int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
876void rds_inc_info_copy(struct rds_incoming *inc,
877 struct rds_info_iterator *iter,
878 __be32 saddr, __be32 daddr, int flip);
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700879void rds6_inc_info_copy(struct rds_incoming *inc,
880 struct rds_info_iterator *iter,
881 struct in6_addr *saddr, struct in6_addr *daddr,
882 int flip);
Andy Grover39de8282009-02-24 15:30:19 +0000883
884/* send.c */
Ying Xue1b784142015-03-02 15:37:48 +0800885int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700886void rds_send_path_reset(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700887int rds_send_xmit(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000888struct sockaddr_in;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700889void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest);
Andy Grover39de8282009-02-24 15:30:19 +0000890typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
891void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
892 is_acked_func is_acked);
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700893void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
894 is_acked_func is_acked);
Sowmini Varadhan69b92b52017-06-21 13:40:12 -0700895void rds_send_ping(struct rds_connection *conn, int cp_index);
Sowmini Varadhan45997e92016-06-13 09:44:36 -0700896int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
Andy Grover39de8282009-02-24 15:30:19 +0000897
898/* rdma.c */
899void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
Andy Grover21f79af2010-01-12 12:57:27 -0800900int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
901int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
902int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
903void rds_rdma_drop_keys(struct rds_sock *rs);
904int rds_rdma_extra_size(struct rds_rdma_args *args);
905int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
906 struct cmsghdr *cmsg);
907int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
908 struct cmsghdr *cmsg);
909int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
910 struct cmsghdr *cmsg);
911int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
912 struct cmsghdr *cmsg);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800913void rds_rdma_free_op(struct rm_rdma_op *ro);
Andy Groverd0ab25a2010-01-27 16:15:48 -0800914void rds_atomic_free_op(struct rm_atomic_op *ao);
Andy Grover15133f62010-01-12 14:33:38 -0800915void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
916void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
917int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
918 struct cmsghdr *cmsg);
Andy Grover21f79af2010-01-12 12:57:27 -0800919
Joe Perchesc1b12032013-10-18 13:48:25 -0700920void __rds_put_mr_final(struct rds_mr *mr);
Andy Grover21f79af2010-01-12 12:57:27 -0800921static inline void rds_mr_put(struct rds_mr *mr)
922{
Reshetova, Elena803ea852017-07-04 15:53:17 +0300923 if (refcount_dec_and_test(&mr->r_refcount))
Andy Grover21f79af2010-01-12 12:57:27 -0800924 __rds_put_mr_final(mr);
925}
Andy Grover39de8282009-02-24 15:30:19 +0000926
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800927static inline bool rds_destroy_pending(struct rds_connection *conn)
928{
929 return !check_net(rds_conn_net(conn)) ||
930 (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
931}
932
Andy Grover39de8282009-02-24 15:30:19 +0000933/* stats.c */
David Howells9b8de742009-04-21 23:00:24 +0100934DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
Andy Grover39de8282009-02-24 15:30:19 +0000935#define rds_stats_inc_which(which, member) do { \
936 per_cpu(which, get_cpu()).member++; \
937 put_cpu(); \
938} while (0)
939#define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
940#define rds_stats_add_which(which, member, count) do { \
941 per_cpu(which, get_cpu()).member += count; \
942 put_cpu(); \
943} while (0)
944#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
Zach Brownef87b7e2010-07-09 12:26:20 -0700945int rds_stats_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000946void rds_stats_exit(void);
947void rds_stats_info_copy(struct rds_info_iterator *iter,
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700948 uint64_t *values, const char *const *names,
949 size_t nr);
Andy Grover39de8282009-02-24 15:30:19 +0000950
951/* sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700952int rds_sysctl_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000953void rds_sysctl_exit(void);
954extern unsigned long rds_sysctl_sndbuf_min;
955extern unsigned long rds_sysctl_sndbuf_default;
956extern unsigned long rds_sysctl_sndbuf_max;
957extern unsigned long rds_sysctl_reconnect_min_jiffies;
958extern unsigned long rds_sysctl_reconnect_max_jiffies;
959extern unsigned int rds_sysctl_max_unacked_packets;
960extern unsigned int rds_sysctl_max_unacked_bytes;
961extern unsigned int rds_sysctl_ping_enable;
962extern unsigned long rds_sysctl_trace_flags;
963extern unsigned int rds_sysctl_trace_level;
964
965/* threads.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700966int rds_threads_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000967void rds_threads_exit(void);
968extern struct workqueue_struct *rds_wq;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700969void rds_queue_reconnect(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000970void rds_connect_worker(struct work_struct *);
971void rds_shutdown_worker(struct work_struct *);
972void rds_send_worker(struct work_struct *);
973void rds_recv_worker(struct work_struct *);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700974void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
Andy Grover39de8282009-02-24 15:30:19 +0000975void rds_connect_complete(struct rds_connection *conn);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700976int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2);
Andy Grover39de8282009-02-24 15:30:19 +0000977
978/* transport.c */
Zhu Yanjuna8d63a52017-03-03 00:44:26 -0500979void rds_trans_register(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +0000980void rds_trans_unregister(struct rds_transport *trans);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700981struct rds_transport *rds_trans_get_preferred(struct net *net,
982 const struct in6_addr *addr,
983 __u32 scope_id);
Zach Brown5adb5bc2010-07-23 10:32:31 -0700984void rds_trans_put(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +0000985unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
986 unsigned int avail);
Sowmini Varadhand97dac52015-05-29 17:28:08 -0400987struct rds_transport *rds_trans_get(int t_type);
Zach Brownef87b7e2010-07-09 12:26:20 -0700988int rds_trans_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000989void rds_trans_exit(void);
990
991#endif