blob: 3cda01cfaa56bcd52400ea53906af9a716ce7eb4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andy Grover39de8282009-02-24 15:30:19 +00002#ifndef _RDS_RDS_H
3#define _RDS_RDS_H
4
5#include <net/sock.h>
6#include <linux/scatterlist.h>
7#include <linux/highmem.h>
8#include <rdma/rdma_cm.h>
9#include <linux/mutex.h>
10#include <linux/rds.h>
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -070011#include <linux/rhashtable.h>
Reshetova, Elenab7f02922017-07-04 15:53:16 +030012#include <linux/refcount.h>
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070013#include <linux/in6.h>
Andy Grover39de8282009-02-24 15:30:19 +000014
15#include "info.h"
16
17/*
18 * RDS Network protocol version
19 */
20#define RDS_PROTOCOL_3_0 0x0300
21#define RDS_PROTOCOL_3_1 0x0301
Santosh Shilimkarcdc306a2018-10-13 20:34:42 +080022#define RDS_PROTOCOL_4_0 0x0400
23#define RDS_PROTOCOL_4_1 0x0401
Andy Grover39de8282009-02-24 15:30:19 +000024#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
25#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
26#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
27#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
Santosh Shilimkarcdc306a2018-10-13 20:34:42 +080028#define RDS_PROTOCOL_COMPAT_VERSION RDS_PROTOCOL_3_1
Andy Grover39de8282009-02-24 15:30:19 +000029
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -070030/* The following ports, 16385, 18634, 18635, are registered with IANA as
31 * the ports to be used for RDS over TCP and UDP. Currently, only RDS over
32 * TCP and RDS over IB/RDMA are implemented. 18634 is the historical value
33 * used for the RDMA_CM listener port. RDS/TCP uses port 16385. After
34 * IPv6 work, RDMA_CM also uses 16385 as the listener port. 18634 is kept
35 * to ensure compatibility with older RDS modules. Those ports are defined
36 * in each transport's header file.
Andy Grover39de8282009-02-24 15:30:19 +000037 */
38#define RDS_PORT 18634
39
Andy Grover8cbd9602009-04-01 08:20:20 +000040#ifdef ATOMIC64_INIT
41#define KERNEL_HAS_ATOMIC64
42#endif
shamir rabinovitchff570872016-10-27 05:46:38 -040043#ifdef RDS_DEBUG
Andy Grover39de8282009-02-24 15:30:19 +000044#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
45#else
46/* sigh, pr_debug() causes unused variable warnings */
Joe Perchesb9075fa2011-10-31 17:11:33 -070047static inline __printf(1, 2)
48void rdsdebug(char *fmt, ...)
Andy Grover39de8282009-02-24 15:30:19 +000049{
50}
51#endif
52
Andy Grover39de8282009-02-24 15:30:19 +000053#define RDS_FRAG_SHIFT 12
54#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
55
Avinash Repakaf9fb69a2016-02-29 15:30:57 -080056/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
57#define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
58
Andy Grover39de8282009-02-24 15:30:19 +000059#define RDS_CONG_MAP_BYTES (65536 / 8)
Andy Grover39de8282009-02-24 15:30:19 +000060#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
61#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
62
63struct rds_cong_map {
64 struct rb_node m_rb_node;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -070065 struct in6_addr m_addr;
Andy Grover39de8282009-02-24 15:30:19 +000066 wait_queue_head_t m_waitq;
67 struct list_head m_conn_list;
68 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
69};
70
71
72/*
73 * This is how we will track the connection state:
74 * A connection is always in one of the following
75 * states. Updates to the state are atomic and imply
76 * a memory barrier.
77 */
78enum {
79 RDS_CONN_DOWN = 0,
80 RDS_CONN_CONNECTING,
81 RDS_CONN_DISCONNECTING,
82 RDS_CONN_UP,
Sowmini Varadhan9c794402016-06-04 14:00:00 -070083 RDS_CONN_RESETTING,
Andy Grover39de8282009-02-24 15:30:19 +000084 RDS_CONN_ERROR,
85};
86
87/* Bits for c_flags */
88#define RDS_LL_SEND_FULL 0
89#define RDS_RECONNECT_PENDING 1
Zach Brown0f4b1c72010-06-04 14:41:41 -070090#define RDS_IN_XMIT 2
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -070091#define RDS_RECV_REFILL 3
Sowmini Varadhanc90ecbf2018-01-04 06:52:59 -080092#define RDS_DESTROY_PENDING 4
Andy Grover39de8282009-02-24 15:30:19 +000093
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070094/* Max number of multipaths per RDS connection. Must be a power of 2 */
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -070095#define RDS_MPATH_WORKERS 8
96#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
97 (rs)->rs_hash_initval) & ((n) - 1))
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070098
Sowmini Varadhan00354de2017-06-15 11:28:54 -070099#define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
100
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700101/* Per mpath connection state */
102struct rds_conn_path {
103 struct rds_connection *cp_conn;
104 struct rds_message *cp_xmit_rm;
105 unsigned long cp_xmit_sg;
106 unsigned int cp_xmit_hdr_off;
107 unsigned int cp_xmit_data_off;
108 unsigned int cp_xmit_atomic_sent;
109 unsigned int cp_xmit_rdma_sent;
110 unsigned int cp_xmit_data_sent;
111
112 spinlock_t cp_lock; /* protect msg queues */
113 u64 cp_next_tx_seq;
114 struct list_head cp_send_queue;
115 struct list_head cp_retrans;
116
117 u64 cp_next_rx_seq;
118
119 void *cp_transport_data;
120
121 atomic_t cp_state;
122 unsigned long cp_send_gen;
123 unsigned long cp_flags;
124 unsigned long cp_reconnect_jiffies;
125 struct delayed_work cp_send_w;
126 struct delayed_work cp_recv_w;
127 struct delayed_work cp_conn_w;
128 struct work_struct cp_down_w;
129 struct mutex cp_cm_lock; /* protect cp_state & cm */
130 wait_queue_head_t cp_waitq;
131
132 unsigned int cp_unacked_packets;
133 unsigned int cp_unacked_bytes;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700134 unsigned int cp_index;
135};
136
137/* One rds_connection per RDS address pair */
Andy Grover39de8282009-02-24 15:30:19 +0000138struct rds_connection {
139 struct hlist_node c_hash_node;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700140 struct in6_addr c_laddr;
141 struct in6_addr c_faddr;
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700142 int c_dev_if; /* ifindex used for this conn */
143 int c_bound_if; /* ifindex of c_laddr */
Sowmini Varadhan3b20fc32015-09-30 16:54:07 -0400144 unsigned int c_loopback:1,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700145 c_isv6:1,
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700146 c_ping_triggered:1,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700147 c_pad_to_32:29;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700148 int c_npaths;
Andy Grover39de8282009-02-24 15:30:19 +0000149 struct rds_connection *c_passive;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700150 struct rds_transport *c_trans;
Andy Grover39de8282009-02-24 15:30:19 +0000151
152 struct rds_cong_map *c_lcong;
153 struct rds_cong_map *c_fcong;
154
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700155 /* Protocol version */
Santosh Shilimkarcdc306a2018-10-13 20:34:42 +0800156 unsigned int c_proposed_version;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700157 unsigned int c_version;
Sowmini Varadhan681648e2017-11-30 11:11:28 -0800158 possible_net_t c_net;
Andy Grover39de8282009-02-24 15:30:19 +0000159
Santosh Shilimkar3eb45032018-10-23 23:21:14 -0400160 /* TOS */
161 u8 c_tos;
162
Andy Grover39de8282009-02-24 15:30:19 +0000163 struct list_head c_map_item;
164 unsigned long c_map_queued;
Andy Grover39de8282009-02-24 15:30:19 +0000165
Sowmini Varadhan840df162017-08-02 10:34:31 -0700166 struct rds_conn_path *c_path;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700167 wait_queue_head_t c_hs_waitq; /* handshake waitq */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800168
169 u32 c_my_gen_num;
170 u32 c_peer_gen_num;
Andy Grover39de8282009-02-24 15:30:19 +0000171};
172
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400173static inline
174struct net *rds_conn_net(struct rds_connection *conn)
175{
Sowmini Varadhan681648e2017-11-30 11:11:28 -0800176 return read_pnet(&conn->c_net);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400177}
178
179static inline
180void rds_conn_net_set(struct rds_connection *conn, struct net *net)
181{
Sowmini Varadhan681648e2017-11-30 11:11:28 -0800182 write_pnet(&conn->c_net, net);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400183}
184
Andy Grover39de8282009-02-24 15:30:19 +0000185#define RDS_FLAG_CONG_BITMAP 0x01
186#define RDS_FLAG_ACK_REQUIRED 0x02
187#define RDS_FLAG_RETRANSMITTED 0x04
Steve Wise7b70d032009-04-09 14:09:39 +0000188#define RDS_MAX_ADV_CREDIT 255
Andy Grover39de8282009-02-24 15:30:19 +0000189
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700190/* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
191 * probe to exchange control information before establishing a connection.
192 * Currently the control information that is exchanged is the number of
193 * supported paths. If the peer is a legacy (older kernel revision) peer,
194 * it would return a pong message without additional control information
195 * that would then alert the sender that the peer was an older rev.
196 */
197#define RDS_FLAG_PROBE_PORT 1
198#define RDS_HS_PROBE(sport, dport) \
199 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
200 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
Andy Grover39de8282009-02-24 15:30:19 +0000201/*
202 * Maximum space available for extension headers.
203 */
204#define RDS_HEADER_EXT_SPACE 16
205
206struct rds_header {
207 __be64 h_sequence;
208 __be64 h_ack;
209 __be32 h_len;
210 __be16 h_sport;
211 __be16 h_dport;
212 u8 h_flags;
213 u8 h_credit;
214 u8 h_padding[4];
215 __sum16 h_csum;
216
217 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
218};
219
220/*
221 * Reserved - indicates end of extensions
222 */
223#define RDS_EXTHDR_NONE 0
224
225/*
226 * This extension header is included in the very
227 * first message that is sent on a new connection,
228 * and identifies the protocol level. This will help
229 * rolling updates if a future change requires breaking
230 * the protocol.
231 * NB: This is no longer true for IB, where we do a version
232 * negotiation during the connection setup phase (protocol
233 * version information is included in the RDMA CM private data).
234 */
235#define RDS_EXTHDR_VERSION 1
236struct rds_ext_header_version {
237 __be32 h_version;
238};
239
240/*
241 * This extension header is included in the RDS message
242 * chasing an RDMA operation.
243 */
244#define RDS_EXTHDR_RDMA 2
245struct rds_ext_header_rdma {
246 __be32 h_rdma_rkey;
247};
248
249/*
250 * This extension header tells the peer about the
251 * destination <R_Key,offset> of the requested RDMA
252 * operation.
253 */
254#define RDS_EXTHDR_RDMA_DEST 3
255struct rds_ext_header_rdma_dest {
256 __be32 h_rdma_rkey;
257 __be32 h_rdma_offset;
258};
259
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700260/* Extension header announcing number of paths.
261 * Implicit length = 2 bytes.
262 */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800263#define RDS_EXTHDR_NPATHS 5
264#define RDS_EXTHDR_GEN_NUM 6
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700265
Andy Grover39de8282009-02-24 15:30:19 +0000266#define __RDS_EXTHDR_MAX 16 /* for now */
Santosh Shilimkar32890252016-07-04 22:35:15 -0700267#define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
268#define RDS_MSG_RX_HDR 0
269#define RDS_MSG_RX_START 1
270#define RDS_MSG_RX_END 2
271#define RDS_MSG_RX_CMSG 3
Andy Grover39de8282009-02-24 15:30:19 +0000272
Dag Moxnesbf1867d2019-08-23 16:03:18 +0200273/* The following values are whitelisted for usercopy */
274struct rds_inc_usercopy {
275 rds_rdma_cookie_t rdma_cookie;
276 ktime_t rx_tstamp;
277};
278
Andy Grover39de8282009-02-24 15:30:19 +0000279struct rds_incoming {
Reshetova, Elenab7f02922017-07-04 15:53:16 +0300280 refcount_t i_refcount;
Andy Grover39de8282009-02-24 15:30:19 +0000281 struct list_head i_item;
282 struct rds_connection *i_conn;
Sowmini Varadhanef9e62c2016-06-13 09:44:28 -0700283 struct rds_conn_path *i_conn_path;
Andy Grover39de8282009-02-24 15:30:19 +0000284 struct rds_header i_hdr;
285 unsigned long i_rx_jiffies;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700286 struct in6_addr i_saddr;
Andy Grover39de8282009-02-24 15:30:19 +0000287
Dag Moxnesbf1867d2019-08-23 16:03:18 +0200288 struct rds_inc_usercopy i_usercopy;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700289 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
Andy Grover39de8282009-02-24 15:30:19 +0000290};
291
Andy Grover21f79af2010-01-12 12:57:27 -0800292struct rds_mr {
293 struct rb_node r_rb_node;
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700294 struct kref r_kref;
Andy Grover21f79af2010-01-12 12:57:27 -0800295 u32 r_key;
296
297 /* A copy of the creation flags */
298 unsigned int r_use_once:1;
299 unsigned int r_invalidate:1;
300 unsigned int r_write:1;
301
302 /* This is for RDS_MR_DEAD.
303 * It would be nice & consistent to make this part of the above
304 * bit field here, but we need to use test_and_set_bit.
305 */
306 unsigned long r_state;
307 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
308 struct rds_transport *r_trans;
309 void *r_trans_private;
310};
311
312/* Flags for mr->r_state */
313#define RDS_MR_DEAD 0
314
Andy Grover21f79af2010-01-12 12:57:27 -0800315static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
316{
317 return r_key | (((u64) offset) << 32);
318}
319
320static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
321{
322 return cookie;
323}
324
325static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
326{
327 return cookie >> 32;
328}
329
Andy Grover15133f62010-01-12 14:33:38 -0800330/* atomic operation types */
331#define RDS_ATOMIC_TYPE_CSWP 0
332#define RDS_ATOMIC_TYPE_FADD 1
333
Andy Grover39de8282009-02-24 15:30:19 +0000334/*
335 * m_sock_item and m_conn_item are on lists that are serialized under
336 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
337 * the message will not be put back on the retransmit list after being sent.
338 * messages that are canceled while being sent rely on this.
339 *
340 * m_inc is used by loopback so that it can pass an incoming message straight
341 * back up into the rx path. It embeds a wire header which is also used by
342 * the send path, which is kind of awkward.
343 *
344 * m_sock_item indicates the message's presence on a socket's send or receive
345 * queue. m_rs will point to that socket.
346 *
347 * m_daddr is used by cancellation to prune messages to a given destination.
348 *
349 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
350 * nesting. As paths iterate over messages on a sock, or conn, they must
351 * also lock the conn, or sock, to remove the message from those lists too.
352 * Testing the flag to determine if the message is still on the lists lets
353 * us avoid testing the list_head directly. That means each path can use
354 * the message's list_head to keep it on a local list while juggling locks
355 * without confusing the other path.
356 *
357 * m_ack_seq is an optional field set by transports who need a different
358 * sequence number range to invalidate. They can use this in a callback
359 * that they pass to rds_send_drop_acked() to see if each message has been
360 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
361 * had ack_seq set yet.
362 */
363#define RDS_MSG_ON_SOCK 1
364#define RDS_MSG_ON_CONN 2
365#define RDS_MSG_HAS_ACK_SEQ 3
366#define RDS_MSG_ACK_REQUIRED 4
367#define RDS_MSG_RETRANSMITTED 5
368#define RDS_MSG_MAPPED 6
369#define RDS_MSG_PAGEVEC 7
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800370#define RDS_MSG_FLUSH 8
Andy Grover39de8282009-02-24 15:30:19 +0000371
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800372struct rds_znotifier {
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800373 struct mmpin z_mmp;
374 u32 z_cookie;
375};
376
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800377struct rds_msg_zcopy_info {
378 struct list_head rs_zcookie_next;
379 union {
380 struct rds_znotifier znotif;
381 struct rds_zcopy_cookies zcookies;
382 };
383};
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800384
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800385struct rds_msg_zcopy_queue {
386 struct list_head zcookie_head;
387 spinlock_t lock; /* protects zcookie_head queue */
388};
389
390static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800391{
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800392 spin_lock_init(&q->lock);
393 INIT_LIST_HEAD(&q->zcookie_head);
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800394}
395
shamir rabinovitchea010072018-12-16 09:01:08 +0200396struct rds_iov_vector {
397 struct rds_iovec *iov;
398 int len;
399};
400
401struct rds_iov_vector_arr {
402 struct rds_iov_vector *vec;
403 int len;
404 int indx;
405 int incr;
406};
407
Andy Grover39de8282009-02-24 15:30:19 +0000408struct rds_message {
Reshetova, Elena6c5a1c42017-07-04 15:53:18 +0300409 refcount_t m_refcount;
Andy Grover39de8282009-02-24 15:30:19 +0000410 struct list_head m_sock_item;
411 struct list_head m_conn_item;
412 struct rds_incoming m_inc;
413 u64 m_ack_seq;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700414 struct in6_addr m_daddr;
Andy Grover39de8282009-02-24 15:30:19 +0000415 unsigned long m_flags;
416
417 /* Never access m_rs without holding m_rs_lock.
418 * Lock nesting is
419 * rm->m_rs_lock
420 * -> rs->rs_lock
421 */
422 spinlock_t m_rs_lock;
Chris Masonc83188d2010-04-21 13:09:28 -0700423 wait_queue_head_t m_flush_wait;
424
Andy Grover39de8282009-02-24 15:30:19 +0000425 struct rds_sock *m_rs;
Andy Grover7e3bd652010-03-01 16:04:59 -0800426
427 /* cookie to send to remote, in rds header */
Andy Grover39de8282009-02-24 15:30:19 +0000428 rds_rdma_cookie_t m_rdma_cookie;
Andy Grover7e3bd652010-03-01 16:04:59 -0800429
430 unsigned int m_used_sgs;
431 unsigned int m_total_sgs;
432
Andy Groverff3d7d32010-03-01 14:03:09 -0800433 void *m_final_op;
434
Andy Grovere7791372010-01-12 12:15:02 -0800435 struct {
Andy Grover15133f62010-01-12 14:33:38 -0800436 struct rm_atomic_op {
437 int op_type;
Andy Grover20c72bd2010-08-25 05:51:28 -0700438 union {
439 struct {
440 uint64_t compare;
441 uint64_t swap;
442 uint64_t compare_mask;
443 uint64_t swap_mask;
444 } op_m_cswp;
445 struct {
446 uint64_t add;
447 uint64_t nocarry_mask;
448 } op_m_fadd;
449 };
Andy Grover15133f62010-01-12 14:33:38 -0800450
451 u32 op_rkey;
452 u64 op_remote_addr;
453 unsigned int op_notify:1;
454 unsigned int op_recverr:1;
455 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800456 unsigned int op_silent:1;
Andy Grover15133f62010-01-12 14:33:38 -0800457 unsigned int op_active:1;
Andy Grover15133f62010-01-12 14:33:38 -0800458 struct scatterlist *op_sg;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800459 struct rds_notifier *op_notifier;
Andy Grover15133f62010-01-12 14:33:38 -0800460
461 struct rds_mr *op_rdma_mr;
462 } atomic;
463 struct rm_rdma_op {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800464 u32 op_rkey;
465 u64 op_remote_addr;
466 unsigned int op_write:1;
467 unsigned int op_fence:1;
468 unsigned int op_notify:1;
469 unsigned int op_recverr:1;
470 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800471 unsigned int op_silent:1;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800472 unsigned int op_active:1;
473 unsigned int op_bytes;
474 unsigned int op_nents;
475 unsigned int op_count;
476 struct scatterlist *op_sg;
477 struct rds_notifier *op_notifier;
478
479 struct rds_mr *op_rdma_mr;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200480
481 u64 op_odp_addr;
482 struct rds_mr *op_odp_mr;
Andy Grovere7791372010-01-12 12:15:02 -0800483 } rdma;
Andy Grover15133f62010-01-12 14:33:38 -0800484 struct rm_data_op {
Andy Grover241eef32010-01-19 21:25:26 -0800485 unsigned int op_active:1;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800486 unsigned int op_nents;
487 unsigned int op_count;
Wengang Wangd655a9f2015-05-21 13:11:40 +0800488 unsigned int op_dmasg;
489 unsigned int op_dmaoff;
Sowmini Varadhan01883ed2018-02-15 10:49:35 -0800490 struct rds_znotifier *op_mmp_znotifier;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800491 struct scatterlist *op_sg;
Andy Grovere7791372010-01-12 12:15:02 -0800492 } data;
493 };
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700494
495 struct rds_conn_path *m_conn_path;
Andy Grover39de8282009-02-24 15:30:19 +0000496};
497
498/*
499 * The RDS notifier is used (optionally) to tell the application about
500 * completed RDMA operations. Rather than keeping the whole rds message
501 * around on the queue, we allocate a small notifier that is put on the
502 * socket's notifier_list. Notifications are delivered to the application
503 * through control messages.
504 */
505struct rds_notifier {
506 struct list_head n_list;
507 uint64_t n_user_token;
508 int n_status;
509};
510
Santosh Shilimkarf1693c62018-06-14 11:52:34 -0700511/* Available as part of RDS core, so doesn't need to participate
512 * in get_preferred transport etc
513 */
514#define RDS_TRANS_LOOP 3
515
Andy Grover39de8282009-02-24 15:30:19 +0000516/**
517 * struct rds_transport - transport specific behavioural hooks
518 *
519 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
520 * part of a message. The caller serializes on the send_sem so this
521 * doesn't need to be reentrant for a given conn. The header must be
522 * sent before the data payload. .xmit must be prepared to send a
523 * message with no data payload. .xmit should return the number of
524 * bytes that were sent down the connection, including header bytes.
525 * Returning 0 tells the caller that it doesn't need to perform any
526 * additional work now. This is usually the case when the transport has
527 * filled the sending queue for its connection and will handle
528 * triggering the rds thread to continue the send when space becomes
529 * available. Returning -EAGAIN tells the caller to retry the send
530 * immediately. Returning -ENOMEM tells the caller to retry the send at
531 * some point in the future.
532 *
533 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
534 * it returns the connection can not call rds_recv_incoming().
535 * This will only be called once after conn_connect returns
536 * non-zero success and will The caller serializes this with
537 * the send and connecting paths (xmit_* and conn_*). The
538 * transport is responsible for other serialization, including
539 * rds_recv_incoming(). This is called in process context but
540 * should try hard not to block.
Andy Grover39de8282009-02-24 15:30:19 +0000541 */
542
543struct rds_transport {
544 char t_name[TRANSNAMSIZ];
545 struct list_head t_item;
546 struct module *t_owner;
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700547 unsigned int t_prefer_loopback:1,
548 t_mp_capable:1;
Andy Grover335776b2009-08-21 12:28:34 +0000549 unsigned int t_type;
Andy Grover39de8282009-02-24 15:30:19 +0000550
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700551 int (*laddr_check)(struct net *net, const struct in6_addr *addr,
552 __u32 scope_id);
Andy Grover39de8282009-02-24 15:30:19 +0000553 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
554 void (*conn_free)(void *data);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700555 int (*conn_path_connect)(struct rds_conn_path *cp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700556 void (*conn_path_shutdown)(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700557 void (*xmit_path_prepare)(struct rds_conn_path *cp);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700558 void (*xmit_path_complete)(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000559 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
560 unsigned int hdr_off, unsigned int sg, unsigned int off);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800561 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverff3d7d32010-03-01 14:03:09 -0800562 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700563 int (*recv_path)(struct rds_conn_path *cp);
Al Viroc310e722014-11-20 09:21:14 -0500564 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000565 void (*inc_free)(struct rds_incoming *inc);
566
567 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700568 struct rdma_cm_event *event, bool isv6);
569 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6);
Andy Grover39de8282009-02-24 15:30:19 +0000570 void (*cm_connect_complete)(struct rds_connection *conn,
571 struct rdma_cm_event *event);
572
573 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
574 unsigned int avail);
575 void (*exit)(void);
576 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700577 struct rds_sock *rs, u32 *key_ret,
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200578 struct rds_connection *conn,
579 u64 start, u64 length, int need_odp);
Andy Grover39de8282009-02-24 15:30:19 +0000580 void (*sync_mr)(void *trans_private, int direction);
581 void (*free_mr)(void *trans_private, int invalidate);
582 void (*flush_mrs)(void);
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800583 bool (*t_unloading)(struct rds_connection *conn);
Santosh Shilimkar56dc8bc2018-10-13 21:36:49 +0800584 u8 (*get_tos_map)(u8 tos);
Andy Grover39de8282009-02-24 15:30:19 +0000585};
586
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700587/* Bind hash table key length. It is the sum of the size of a struct
588 * in6_addr, a scope_id and a port.
589 */
590#define RDS_BOUND_KEY_LEN \
591 (sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16))
592
Andy Grover39de8282009-02-24 15:30:19 +0000593struct rds_sock {
594 struct sock rs_sk;
595
596 u64 rs_user_addr;
597 u64 rs_user_bytes;
598
599 /*
600 * bound_addr used for both incoming and outgoing, no INADDR_ANY
601 * support.
602 */
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700603 struct rhash_head rs_bound_node;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700604 u8 rs_bound_key[RDS_BOUND_KEY_LEN];
605 struct sockaddr_in6 rs_bound_sin6;
606#define rs_bound_addr rs_bound_sin6.sin6_addr
607#define rs_bound_addr_v4 rs_bound_sin6.sin6_addr.s6_addr32[3]
608#define rs_bound_port rs_bound_sin6.sin6_port
609#define rs_bound_scope_id rs_bound_sin6.sin6_scope_id
610 struct in6_addr rs_conn_addr;
611#define rs_conn_addr_v4 rs_conn_addr.s6_addr32[3]
Andy Grover39de8282009-02-24 15:30:19 +0000612 __be16 rs_conn_port;
Andy Grover39de8282009-02-24 15:30:19 +0000613 struct rds_transport *rs_transport;
614
615 /*
616 * rds_sendmsg caches the conn it used the last time around.
617 * This helps avoid costly lookups.
618 */
619 struct rds_connection *rs_conn;
620
621 /* flag indicating we were congested or not */
622 int rs_congested;
Andy Groverb98ba522010-03-11 13:50:04 +0000623 /* seen congestion (ENOBUFS) when sending? */
624 int rs_seen_congestion;
Andy Grover39de8282009-02-24 15:30:19 +0000625
626 /* rs_lock protects all these adjacent members before the newline */
627 spinlock_t rs_lock;
628 struct list_head rs_send_queue;
629 u32 rs_snd_bytes;
630 int rs_rcv_bytes;
631 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
632
633 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
634 * to decide whether the application should be woken up.
635 * If not set, we use rs_cong_track to find out whether a cong map
636 * update arrived.
637 */
638 uint64_t rs_cong_mask;
639 uint64_t rs_cong_notify;
640 struct list_head rs_cong_list;
641 unsigned long rs_cong_track;
642
643 /*
644 * rs_recv_lock protects the receive queue, and is
645 * used to serialize with rds_release.
646 */
647 rwlock_t rs_recv_lock;
648 struct list_head rs_recv_queue;
649
650 /* just for stats reporting */
651 struct list_head rs_item;
652
653 /* these have their own lock */
654 spinlock_t rs_rdma_lock;
655 struct rb_root rs_rdma_keys;
656
657 /* Socket options - in case there will be more */
658 unsigned char rs_recverr,
659 rs_cong_monitor;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700660 u32 rs_hash_initval;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700661
662 /* Socket receive path trace points*/
663 u8 rs_rx_traces;
664 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800665 struct rds_msg_zcopy_queue rs_zcookie_queue;
Santosh Shilimkar3eb45032018-10-23 23:21:14 -0400666 u8 rs_tos;
Andy Grover39de8282009-02-24 15:30:19 +0000667};
668
669static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
670{
671 return container_of(sk, struct rds_sock, rs_sk);
672}
673static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
674{
675 return &rs->rs_sk;
676}
677
678/*
679 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
680 * to account for overhead. We don't account for overhead, we just apply
681 * the number of payload bytes to the specified value.
682 */
683static inline int rds_sk_sndbuf(struct rds_sock *rs)
684{
685 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
686}
687static inline int rds_sk_rcvbuf(struct rds_sock *rs)
688{
689 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
690}
691
692struct rds_statistics {
693 uint64_t s_conn_reset;
694 uint64_t s_recv_drop_bad_checksum;
695 uint64_t s_recv_drop_old_seq;
696 uint64_t s_recv_drop_no_sock;
697 uint64_t s_recv_drop_dead_sock;
698 uint64_t s_recv_deliver_raced;
699 uint64_t s_recv_delivered;
700 uint64_t s_recv_queued;
701 uint64_t s_recv_immediate_retry;
702 uint64_t s_recv_delayed_retry;
703 uint64_t s_recv_ack_required;
704 uint64_t s_recv_rdma_bytes;
705 uint64_t s_recv_ping;
706 uint64_t s_send_queue_empty;
707 uint64_t s_send_queue_full;
Andy Grover049ee3f2010-03-23 17:39:07 -0700708 uint64_t s_send_lock_contention;
709 uint64_t s_send_lock_queue_raced;
Andy Grover39de8282009-02-24 15:30:19 +0000710 uint64_t s_send_immediate_retry;
711 uint64_t s_send_delayed_retry;
712 uint64_t s_send_drop_acked;
713 uint64_t s_send_ack_required;
714 uint64_t s_send_queued;
715 uint64_t s_send_rdma;
716 uint64_t s_send_rdma_bytes;
717 uint64_t s_send_pong;
718 uint64_t s_page_remainder_hit;
719 uint64_t s_page_remainder_miss;
720 uint64_t s_copy_to_user;
721 uint64_t s_copy_from_user;
722 uint64_t s_cong_update_queued;
723 uint64_t s_cong_update_received;
724 uint64_t s_cong_send_error;
725 uint64_t s_cong_send_blocked;
Venkat Venkatsubra192a7982016-07-09 17:36:20 -0700726 uint64_t s_recv_bytes_added_to_socket;
727 uint64_t s_recv_bytes_removed_from_socket;
Andy Grover11740ef2011-01-13 11:40:31 -0800728 uint64_t s_send_stuck_rm;
Andy Grover39de8282009-02-24 15:30:19 +0000729};
730
731/* af_rds.c */
732void rds_sock_addref(struct rds_sock *rs);
733void rds_sock_put(struct rds_sock *rs);
734void rds_wake_sk_sleep(struct rds_sock *rs);
735static inline void __rds_wake_sk_sleep(struct sock *sk)
736{
Eric Dumazetaa395142010-04-20 13:03:51 +0000737 wait_queue_head_t *waitq = sk_sleep(sk);
Andy Grover39de8282009-02-24 15:30:19 +0000738
739 if (!sock_flag(sk, SOCK_DEAD) && waitq)
740 wake_up(waitq);
741}
742extern wait_queue_head_t rds_poll_waitq;
743
744
745/* bind.c */
746int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
747void rds_remove_bound(struct rds_sock *rs);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700748struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
749 __u32 scope_id);
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700750int rds_bind_lock_init(void);
751void rds_bind_lock_destroy(void);
Andy Grover39de8282009-02-24 15:30:19 +0000752
753/* cong.c */
754int rds_cong_get_maps(struct rds_connection *conn);
755void rds_cong_add_conn(struct rds_connection *conn);
756void rds_cong_remove_conn(struct rds_connection *conn);
757void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
758void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
759int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
760void rds_cong_queue_updates(struct rds_cong_map *map);
761void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
762int rds_cong_updated_since(unsigned long *recent);
763void rds_cong_add_socket(struct rds_sock *);
764void rds_cong_remove_socket(struct rds_sock *);
765void rds_cong_exit(void);
766struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
767
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700768/* connection.c */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800769extern u32 rds_gen_num;
Zach Brownef87b7e2010-07-09 12:26:20 -0700770int rds_conn_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000771void rds_conn_exit(void);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400772struct rds_connection *rds_conn_create(struct net *net,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700773 const struct in6_addr *laddr,
774 const struct in6_addr *faddr,
Santosh Shilimkar3eb45032018-10-23 23:21:14 -0400775 struct rds_transport *trans,
776 u8 tos, gfp_t gfp,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700777 int dev_if);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400778struct rds_connection *rds_conn_create_outgoing(struct net *net,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700779 const struct in6_addr *laddr,
780 const struct in6_addr *faddr,
781 struct rds_transport *trans,
Santosh Shilimkar3eb45032018-10-23 23:21:14 -0400782 u8 tos, gfp_t gfp, int dev_if);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700783void rds_conn_shutdown(struct rds_conn_path *cpath);
Andy Grover39de8282009-02-24 15:30:19 +0000784void rds_conn_destroy(struct rds_connection *conn);
Andy Grover39de8282009-02-24 15:30:19 +0000785void rds_conn_drop(struct rds_connection *conn);
Sowmini Varadhanaed20a52017-07-16 16:43:46 -0700786void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
Zach Brownf3c68082010-05-24 13:14:36 -0700787void rds_conn_connect_if_down(struct rds_connection *conn);
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700788void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000789void rds_for_each_conn_info(struct socket *sock, unsigned int len,
790 struct rds_info_iterator *iter,
791 struct rds_info_lengths *lens,
792 int (*visitor)(struct rds_connection *, void *),
Salvatore Mesoracaf1cb9d62018-03-11 22:07:49 +0100793 u64 *buffer,
Andy Grover39de8282009-02-24 15:30:19 +0000794 size_t item_len);
Andy Grover39de8282009-02-24 15:30:19 +0000795
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200796__printf(2, 3)
Sowmini Varadhanfb1b3dc2016-06-13 09:44:39 -0700797void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
798#define rds_conn_path_error(cp, fmt...) \
799 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
800
Andy Grover39de8282009-02-24 15:30:19 +0000801static inline int
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700802rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
803{
804 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
805}
806
807static inline int
Andy Grover39de8282009-02-24 15:30:19 +0000808rds_conn_transition(struct rds_connection *conn, int old, int new)
809{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700810 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700811 return rds_conn_path_transition(&conn->c_path[0], old, new);
812}
813
814static inline int
815rds_conn_path_state(struct rds_conn_path *cp)
816{
817 return atomic_read(&cp->cp_state);
Andy Grover39de8282009-02-24 15:30:19 +0000818}
819
820static inline int
821rds_conn_state(struct rds_connection *conn)
822{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700823 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700824 return rds_conn_path_state(&conn->c_path[0]);
825}
826
827static inline int
828rds_conn_path_up(struct rds_conn_path *cp)
829{
830 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
Andy Grover39de8282009-02-24 15:30:19 +0000831}
832
833static inline int
834rds_conn_up(struct rds_connection *conn)
835{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700836 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700837 return rds_conn_path_up(&conn->c_path[0]);
838}
839
840static inline int
841rds_conn_path_connecting(struct rds_conn_path *cp)
842{
843 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
Andy Grover39de8282009-02-24 15:30:19 +0000844}
845
846static inline int
847rds_conn_connecting(struct rds_connection *conn)
848{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700849 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700850 return rds_conn_path_connecting(&conn->c_path[0]);
Andy Grover39de8282009-02-24 15:30:19 +0000851}
852
853/* message.c */
854struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
shamir rabinovitchc75ab8a2018-12-16 09:01:09 +0200855struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
856 int *ret);
Sowmini Varadhan0cebacc2018-02-15 10:49:36 -0800857int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
858 bool zcopy);
Andy Grover39de8282009-02-24 15:30:19 +0000859struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
860void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
861 __be16 dport, u64 seq);
862int rds_message_add_extension(struct rds_header *hdr,
863 unsigned int type, const void *data, unsigned int len);
864int rds_message_next_extension(struct rds_header *hdr,
865 unsigned int *pos, void *buf, unsigned int *buflen);
Andy Grover39de8282009-02-24 15:30:19 +0000866int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
Al Viroc310e722014-11-20 09:21:14 -0500867int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000868void rds_message_inc_free(struct rds_incoming *inc);
869void rds_message_addref(struct rds_message *rm);
870void rds_message_put(struct rds_message *rm);
871void rds_message_wait(struct rds_message *rm);
872void rds_message_unmapped(struct rds_message *rm);
Sowmini Varadhan9426bbc2018-03-06 07:22:34 -0800873void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *info);
Andy Grover39de8282009-02-24 15:30:19 +0000874
875static inline void rds_message_make_checksum(struct rds_header *hdr)
876{
877 hdr->h_csum = 0;
878 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
879}
880
881static inline int rds_message_verify_checksum(const struct rds_header *hdr)
882{
883 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
884}
885
886
887/* page.c */
888int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
889 gfp_t gfp);
Andy Grover39de8282009-02-24 15:30:19 +0000890void rds_page_exit(void);
891
892/* recv.c */
893void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700894 struct in6_addr *saddr);
Sowmini Varadhan5e833e02016-06-13 09:44:29 -0700895void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700896 struct in6_addr *saddr);
Andy Grover39de8282009-02-24 15:30:19 +0000897void rds_inc_put(struct rds_incoming *inc);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700898void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
899 struct in6_addr *daddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800900 struct rds_incoming *inc, gfp_t gfp);
Ying Xue1b784142015-03-02 15:37:48 +0800901int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
902 int msg_flags);
Andy Grover39de8282009-02-24 15:30:19 +0000903void rds_clear_recv_queue(struct rds_sock *rs);
904int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
905void rds_inc_info_copy(struct rds_incoming *inc,
906 struct rds_info_iterator *iter,
907 __be32 saddr, __be32 daddr, int flip);
Ka-Cheong Poon1e2b44e2018-07-23 20:51:22 -0700908void rds6_inc_info_copy(struct rds_incoming *inc,
909 struct rds_info_iterator *iter,
910 struct in6_addr *saddr, struct in6_addr *daddr,
911 int flip);
Andy Grover39de8282009-02-24 15:30:19 +0000912
913/* send.c */
Ying Xue1b784142015-03-02 15:37:48 +0800914int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700915void rds_send_path_reset(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700916int rds_send_xmit(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000917struct sockaddr_in;
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700918void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest);
Andy Grover39de8282009-02-24 15:30:19 +0000919typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
920void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
921 is_acked_func is_acked);
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700922void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
923 is_acked_func is_acked);
Sowmini Varadhan69b92b52017-06-21 13:40:12 -0700924void rds_send_ping(struct rds_connection *conn, int cp_index);
Sowmini Varadhan45997e92016-06-13 09:44:36 -0700925int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
Andy Grover39de8282009-02-24 15:30:19 +0000926
927/* rdma.c */
928void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
Andy Grover21f79af2010-01-12 12:57:27 -0800929int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
930int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
931int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
932void rds_rdma_drop_keys(struct rds_sock *rs);
shamir rabinovitchea010072018-12-16 09:01:08 +0200933int rds_rdma_extra_size(struct rds_rdma_args *args,
934 struct rds_iov_vector *iov);
Andy Grover21f79af2010-01-12 12:57:27 -0800935int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
936 struct cmsghdr *cmsg);
937int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
shamir rabinovitchea010072018-12-16 09:01:08 +0200938 struct cmsghdr *cmsg,
939 struct rds_iov_vector *vec);
Andy Grover21f79af2010-01-12 12:57:27 -0800940int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
941 struct cmsghdr *cmsg);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800942void rds_rdma_free_op(struct rm_rdma_op *ro);
Andy Groverd0ab25a2010-01-27 16:15:48 -0800943void rds_atomic_free_op(struct rm_atomic_op *ao);
Andy Grover15133f62010-01-12 14:33:38 -0800944void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
945void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
946int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
947 struct cmsghdr *cmsg);
Andy Grover21f79af2010-01-12 12:57:27 -0800948
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700949void __rds_put_mr_final(struct kref *kref);
Andy Grover39de8282009-02-24 15:30:19 +0000950
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800951static inline bool rds_destroy_pending(struct rds_connection *conn)
952{
953 return !check_net(rds_conn_net(conn)) ||
954 (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
955}
956
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200957enum {
958 ODP_NOT_NEEDED,
959 ODP_ZEROBASED,
960 ODP_VIRTUAL
961};
962
Andy Grover39de8282009-02-24 15:30:19 +0000963/* stats.c */
David Howells9b8de742009-04-21 23:00:24 +0100964DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
Andy Grover39de8282009-02-24 15:30:19 +0000965#define rds_stats_inc_which(which, member) do { \
966 per_cpu(which, get_cpu()).member++; \
967 put_cpu(); \
968} while (0)
969#define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
970#define rds_stats_add_which(which, member, count) do { \
971 per_cpu(which, get_cpu()).member += count; \
972 put_cpu(); \
973} while (0)
974#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
Zach Brownef87b7e2010-07-09 12:26:20 -0700975int rds_stats_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000976void rds_stats_exit(void);
977void rds_stats_info_copy(struct rds_info_iterator *iter,
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700978 uint64_t *values, const char *const *names,
979 size_t nr);
Andy Grover39de8282009-02-24 15:30:19 +0000980
981/* sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700982int rds_sysctl_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000983void rds_sysctl_exit(void);
984extern unsigned long rds_sysctl_sndbuf_min;
985extern unsigned long rds_sysctl_sndbuf_default;
986extern unsigned long rds_sysctl_sndbuf_max;
987extern unsigned long rds_sysctl_reconnect_min_jiffies;
988extern unsigned long rds_sysctl_reconnect_max_jiffies;
989extern unsigned int rds_sysctl_max_unacked_packets;
990extern unsigned int rds_sysctl_max_unacked_bytes;
991extern unsigned int rds_sysctl_ping_enable;
992extern unsigned long rds_sysctl_trace_flags;
993extern unsigned int rds_sysctl_trace_level;
994
995/* threads.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700996int rds_threads_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000997void rds_threads_exit(void);
998extern struct workqueue_struct *rds_wq;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700999void rds_queue_reconnect(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +00001000void rds_connect_worker(struct work_struct *);
1001void rds_shutdown_worker(struct work_struct *);
1002void rds_send_worker(struct work_struct *);
1003void rds_recv_worker(struct work_struct *);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -07001004void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
Andy Grover39de8282009-02-24 15:30:19 +00001005void rds_connect_complete(struct rds_connection *conn);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -07001006int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2);
Andy Grover39de8282009-02-24 15:30:19 +00001007
1008/* transport.c */
Zhu Yanjuna8d63a52017-03-03 00:44:26 -05001009void rds_trans_register(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +00001010void rds_trans_unregister(struct rds_transport *trans);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -07001011struct rds_transport *rds_trans_get_preferred(struct net *net,
1012 const struct in6_addr *addr,
1013 __u32 scope_id);
Zach Brown5adb5bc2010-07-23 10:32:31 -07001014void rds_trans_put(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +00001015unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
1016 unsigned int avail);
Sowmini Varadhand97dac52015-05-29 17:28:08 -04001017struct rds_transport *rds_trans_get(int t_type);
Zach Brownef87b7e2010-07-09 12:26:20 -07001018int rds_trans_init(void);
Andy Grover39de8282009-02-24 15:30:19 +00001019void rds_trans_exit(void);
1020
1021#endif