blob: aa183d6adbcc381d164d280a815d7b1b0983b2cb [file] [log] [blame]
Andy Grover39de8282009-02-24 15:30:19 +00001#ifndef _RDS_RDS_H
2#define _RDS_RDS_H
3
4#include <net/sock.h>
5#include <linux/scatterlist.h>
6#include <linux/highmem.h>
7#include <rdma/rdma_cm.h>
8#include <linux/mutex.h>
9#include <linux/rds.h>
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -070010#include <linux/rhashtable.h>
Andy Grover39de8282009-02-24 15:30:19 +000011
12#include "info.h"
13
14/*
15 * RDS Network protocol version
16 */
17#define RDS_PROTOCOL_3_0 0x0300
18#define RDS_PROTOCOL_3_1 0x0301
19#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
20#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
21#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
22#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
23
24/*
25 * XXX randomly chosen, but at least seems to be unused:
26 * # 18464-18768 Unassigned
27 * We should do better. We want a reserved port to discourage unpriv'ed
28 * userspace from listening.
29 */
30#define RDS_PORT 18634
31
Andy Grover8cbd9602009-04-01 08:20:20 +000032#ifdef ATOMIC64_INIT
33#define KERNEL_HAS_ATOMIC64
34#endif
35
shamir rabinovitchff570872016-10-27 05:46:38 -040036#ifdef RDS_DEBUG
Andy Grover39de8282009-02-24 15:30:19 +000037#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38#else
39/* sigh, pr_debug() causes unused variable warnings */
Joe Perchesb9075fa2011-10-31 17:11:33 -070040static inline __printf(1, 2)
41void rdsdebug(char *fmt, ...)
Andy Grover39de8282009-02-24 15:30:19 +000042{
43}
44#endif
45
46/* XXX is there one of these somewhere? */
47#define ceil(x, y) \
48 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49
50#define RDS_FRAG_SHIFT 12
51#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
52
Avinash Repakaf9fb69a2016-02-29 15:30:57 -080053/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
54#define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
55
Andy Grover39de8282009-02-24 15:30:19 +000056#define RDS_CONG_MAP_BYTES (65536 / 8)
Andy Grover39de8282009-02-24 15:30:19 +000057#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
58#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
59
60struct rds_cong_map {
61 struct rb_node m_rb_node;
62 __be32 m_addr;
63 wait_queue_head_t m_waitq;
64 struct list_head m_conn_list;
65 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
66};
67
68
69/*
70 * This is how we will track the connection state:
71 * A connection is always in one of the following
72 * states. Updates to the state are atomic and imply
73 * a memory barrier.
74 */
75enum {
76 RDS_CONN_DOWN = 0,
77 RDS_CONN_CONNECTING,
78 RDS_CONN_DISCONNECTING,
79 RDS_CONN_UP,
Sowmini Varadhan9c794402016-06-04 14:00:00 -070080 RDS_CONN_RESETTING,
Andy Grover39de8282009-02-24 15:30:19 +000081 RDS_CONN_ERROR,
82};
83
84/* Bits for c_flags */
85#define RDS_LL_SEND_FULL 0
86#define RDS_RECONNECT_PENDING 1
Zach Brown0f4b1c72010-06-04 14:41:41 -070087#define RDS_IN_XMIT 2
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -070088#define RDS_RECV_REFILL 3
Andy Grover39de8282009-02-24 15:30:19 +000089
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070090/* Max number of multipaths per RDS connection. Must be a power of 2 */
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -070091#define RDS_MPATH_WORKERS 8
92#define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
93 (rs)->rs_hash_initval) & ((n) - 1))
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070094
95/* Per mpath connection state */
96struct rds_conn_path {
97 struct rds_connection *cp_conn;
98 struct rds_message *cp_xmit_rm;
99 unsigned long cp_xmit_sg;
100 unsigned int cp_xmit_hdr_off;
101 unsigned int cp_xmit_data_off;
102 unsigned int cp_xmit_atomic_sent;
103 unsigned int cp_xmit_rdma_sent;
104 unsigned int cp_xmit_data_sent;
105
106 spinlock_t cp_lock; /* protect msg queues */
107 u64 cp_next_tx_seq;
108 struct list_head cp_send_queue;
109 struct list_head cp_retrans;
110
111 u64 cp_next_rx_seq;
112
113 void *cp_transport_data;
114
115 atomic_t cp_state;
116 unsigned long cp_send_gen;
117 unsigned long cp_flags;
118 unsigned long cp_reconnect_jiffies;
119 struct delayed_work cp_send_w;
120 struct delayed_work cp_recv_w;
121 struct delayed_work cp_conn_w;
122 struct work_struct cp_down_w;
123 struct mutex cp_cm_lock; /* protect cp_state & cm */
124 wait_queue_head_t cp_waitq;
125
126 unsigned int cp_unacked_packets;
127 unsigned int cp_unacked_bytes;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700128 unsigned int cp_index;
129};
130
131/* One rds_connection per RDS address pair */
Andy Grover39de8282009-02-24 15:30:19 +0000132struct rds_connection {
133 struct hlist_node c_hash_node;
134 __be32 c_laddr;
135 __be32 c_faddr;
Sowmini Varadhan3b20fc32015-09-30 16:54:07 -0400136 unsigned int c_loopback:1,
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700137 c_ping_triggered:1,
138 c_pad_to_32:30;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700139 int c_npaths;
Andy Grover39de8282009-02-24 15:30:19 +0000140 struct rds_connection *c_passive;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700141 struct rds_transport *c_trans;
Andy Grover39de8282009-02-24 15:30:19 +0000142
143 struct rds_cong_map *c_lcong;
144 struct rds_cong_map *c_fcong;
145
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700146 /* Protocol version */
147 unsigned int c_version;
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800148 struct net *c_net;
Andy Grover39de8282009-02-24 15:30:19 +0000149
150 struct list_head c_map_item;
151 unsigned long c_map_queued;
Andy Grover39de8282009-02-24 15:30:19 +0000152
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700153 struct rds_conn_path c_path[RDS_MPATH_WORKERS];
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700154 wait_queue_head_t c_hs_waitq; /* handshake waitq */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800155
156 u32 c_my_gen_num;
157 u32 c_peer_gen_num;
Andy Grover39de8282009-02-24 15:30:19 +0000158};
159
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400160static inline
161struct net *rds_conn_net(struct rds_connection *conn)
162{
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800163 return conn->c_net;
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400164}
165
166static inline
167void rds_conn_net_set(struct rds_connection *conn, struct net *net)
168{
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800169 conn->c_net = get_net(net);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400170}
171
Andy Grover39de8282009-02-24 15:30:19 +0000172#define RDS_FLAG_CONG_BITMAP 0x01
173#define RDS_FLAG_ACK_REQUIRED 0x02
174#define RDS_FLAG_RETRANSMITTED 0x04
Steve Wise7b70d032009-04-09 14:09:39 +0000175#define RDS_MAX_ADV_CREDIT 255
Andy Grover39de8282009-02-24 15:30:19 +0000176
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700177/* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
178 * probe to exchange control information before establishing a connection.
179 * Currently the control information that is exchanged is the number of
180 * supported paths. If the peer is a legacy (older kernel revision) peer,
181 * it would return a pong message without additional control information
182 * that would then alert the sender that the peer was an older rev.
183 */
184#define RDS_FLAG_PROBE_PORT 1
185#define RDS_HS_PROBE(sport, dport) \
186 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
187 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
Andy Grover39de8282009-02-24 15:30:19 +0000188/*
189 * Maximum space available for extension headers.
190 */
191#define RDS_HEADER_EXT_SPACE 16
192
193struct rds_header {
194 __be64 h_sequence;
195 __be64 h_ack;
196 __be32 h_len;
197 __be16 h_sport;
198 __be16 h_dport;
199 u8 h_flags;
200 u8 h_credit;
201 u8 h_padding[4];
202 __sum16 h_csum;
203
204 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
205};
206
207/*
208 * Reserved - indicates end of extensions
209 */
210#define RDS_EXTHDR_NONE 0
211
212/*
213 * This extension header is included in the very
214 * first message that is sent on a new connection,
215 * and identifies the protocol level. This will help
216 * rolling updates if a future change requires breaking
217 * the protocol.
218 * NB: This is no longer true for IB, where we do a version
219 * negotiation during the connection setup phase (protocol
220 * version information is included in the RDMA CM private data).
221 */
222#define RDS_EXTHDR_VERSION 1
223struct rds_ext_header_version {
224 __be32 h_version;
225};
226
227/*
228 * This extension header is included in the RDS message
229 * chasing an RDMA operation.
230 */
231#define RDS_EXTHDR_RDMA 2
232struct rds_ext_header_rdma {
233 __be32 h_rdma_rkey;
234};
235
236/*
237 * This extension header tells the peer about the
238 * destination <R_Key,offset> of the requested RDMA
239 * operation.
240 */
241#define RDS_EXTHDR_RDMA_DEST 3
242struct rds_ext_header_rdma_dest {
243 __be32 h_rdma_rkey;
244 __be32 h_rdma_offset;
245};
246
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700247/* Extension header announcing number of paths.
248 * Implicit length = 2 bytes.
249 */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800250#define RDS_EXTHDR_NPATHS 5
251#define RDS_EXTHDR_GEN_NUM 6
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700252
Andy Grover39de8282009-02-24 15:30:19 +0000253#define __RDS_EXTHDR_MAX 16 /* for now */
Santosh Shilimkar32890252016-07-04 22:35:15 -0700254#define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
255#define RDS_MSG_RX_HDR 0
256#define RDS_MSG_RX_START 1
257#define RDS_MSG_RX_END 2
258#define RDS_MSG_RX_CMSG 3
Andy Grover39de8282009-02-24 15:30:19 +0000259
260struct rds_incoming {
261 atomic_t i_refcount;
262 struct list_head i_item;
263 struct rds_connection *i_conn;
Sowmini Varadhanef9e62c2016-06-13 09:44:28 -0700264 struct rds_conn_path *i_conn_path;
Andy Grover39de8282009-02-24 15:30:19 +0000265 struct rds_header i_hdr;
266 unsigned long i_rx_jiffies;
267 __be32 i_saddr;
268
269 rds_rdma_cookie_t i_rdma_cookie;
santosh.shilimkar@oracle.com5711f8b2016-03-01 15:20:43 -0800270 struct timeval i_rx_tstamp;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700271 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
Andy Grover39de8282009-02-24 15:30:19 +0000272};
273
Andy Grover21f79af2010-01-12 12:57:27 -0800274struct rds_mr {
275 struct rb_node r_rb_node;
276 atomic_t r_refcount;
277 u32 r_key;
278
279 /* A copy of the creation flags */
280 unsigned int r_use_once:1;
281 unsigned int r_invalidate:1;
282 unsigned int r_write:1;
283
284 /* This is for RDS_MR_DEAD.
285 * It would be nice & consistent to make this part of the above
286 * bit field here, but we need to use test_and_set_bit.
287 */
288 unsigned long r_state;
289 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
290 struct rds_transport *r_trans;
291 void *r_trans_private;
292};
293
294/* Flags for mr->r_state */
295#define RDS_MR_DEAD 0
296
Andy Grover21f79af2010-01-12 12:57:27 -0800297static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
298{
299 return r_key | (((u64) offset) << 32);
300}
301
302static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
303{
304 return cookie;
305}
306
307static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
308{
309 return cookie >> 32;
310}
311
Andy Grover15133f62010-01-12 14:33:38 -0800312/* atomic operation types */
313#define RDS_ATOMIC_TYPE_CSWP 0
314#define RDS_ATOMIC_TYPE_FADD 1
315
Andy Grover39de8282009-02-24 15:30:19 +0000316/*
317 * m_sock_item and m_conn_item are on lists that are serialized under
318 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
319 * the message will not be put back on the retransmit list after being sent.
320 * messages that are canceled while being sent rely on this.
321 *
322 * m_inc is used by loopback so that it can pass an incoming message straight
323 * back up into the rx path. It embeds a wire header which is also used by
324 * the send path, which is kind of awkward.
325 *
326 * m_sock_item indicates the message's presence on a socket's send or receive
327 * queue. m_rs will point to that socket.
328 *
329 * m_daddr is used by cancellation to prune messages to a given destination.
330 *
331 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
332 * nesting. As paths iterate over messages on a sock, or conn, they must
333 * also lock the conn, or sock, to remove the message from those lists too.
334 * Testing the flag to determine if the message is still on the lists lets
335 * us avoid testing the list_head directly. That means each path can use
336 * the message's list_head to keep it on a local list while juggling locks
337 * without confusing the other path.
338 *
339 * m_ack_seq is an optional field set by transports who need a different
340 * sequence number range to invalidate. They can use this in a callback
341 * that they pass to rds_send_drop_acked() to see if each message has been
342 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
343 * had ack_seq set yet.
344 */
345#define RDS_MSG_ON_SOCK 1
346#define RDS_MSG_ON_CONN 2
347#define RDS_MSG_HAS_ACK_SEQ 3
348#define RDS_MSG_ACK_REQUIRED 4
349#define RDS_MSG_RETRANSMITTED 5
350#define RDS_MSG_MAPPED 6
351#define RDS_MSG_PAGEVEC 7
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800352#define RDS_MSG_FLUSH 8
Andy Grover39de8282009-02-24 15:30:19 +0000353
354struct rds_message {
355 atomic_t m_refcount;
356 struct list_head m_sock_item;
357 struct list_head m_conn_item;
358 struct rds_incoming m_inc;
359 u64 m_ack_seq;
360 __be32 m_daddr;
361 unsigned long m_flags;
362
363 /* Never access m_rs without holding m_rs_lock.
364 * Lock nesting is
365 * rm->m_rs_lock
366 * -> rs->rs_lock
367 */
368 spinlock_t m_rs_lock;
Chris Masonc83188d2010-04-21 13:09:28 -0700369 wait_queue_head_t m_flush_wait;
370
Andy Grover39de8282009-02-24 15:30:19 +0000371 struct rds_sock *m_rs;
Andy Grover7e3bd652010-03-01 16:04:59 -0800372
373 /* cookie to send to remote, in rds header */
Andy Grover39de8282009-02-24 15:30:19 +0000374 rds_rdma_cookie_t m_rdma_cookie;
Andy Grover7e3bd652010-03-01 16:04:59 -0800375
376 unsigned int m_used_sgs;
377 unsigned int m_total_sgs;
378
Andy Groverff3d7d32010-03-01 14:03:09 -0800379 void *m_final_op;
380
Andy Grovere7791372010-01-12 12:15:02 -0800381 struct {
Andy Grover15133f62010-01-12 14:33:38 -0800382 struct rm_atomic_op {
383 int op_type;
Andy Grover20c72bd2010-08-25 05:51:28 -0700384 union {
385 struct {
386 uint64_t compare;
387 uint64_t swap;
388 uint64_t compare_mask;
389 uint64_t swap_mask;
390 } op_m_cswp;
391 struct {
392 uint64_t add;
393 uint64_t nocarry_mask;
394 } op_m_fadd;
395 };
Andy Grover15133f62010-01-12 14:33:38 -0800396
397 u32 op_rkey;
398 u64 op_remote_addr;
399 unsigned int op_notify:1;
400 unsigned int op_recverr:1;
401 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800402 unsigned int op_silent:1;
Andy Grover15133f62010-01-12 14:33:38 -0800403 unsigned int op_active:1;
Andy Grover15133f62010-01-12 14:33:38 -0800404 struct scatterlist *op_sg;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800405 struct rds_notifier *op_notifier;
Andy Grover15133f62010-01-12 14:33:38 -0800406
407 struct rds_mr *op_rdma_mr;
408 } atomic;
409 struct rm_rdma_op {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800410 u32 op_rkey;
411 u64 op_remote_addr;
412 unsigned int op_write:1;
413 unsigned int op_fence:1;
414 unsigned int op_notify:1;
415 unsigned int op_recverr:1;
416 unsigned int op_mapped:1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800417 unsigned int op_silent:1;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800418 unsigned int op_active:1;
419 unsigned int op_bytes;
420 unsigned int op_nents;
421 unsigned int op_count;
422 struct scatterlist *op_sg;
423 struct rds_notifier *op_notifier;
424
425 struct rds_mr *op_rdma_mr;
Andy Grovere7791372010-01-12 12:15:02 -0800426 } rdma;
Andy Grover15133f62010-01-12 14:33:38 -0800427 struct rm_data_op {
Andy Grover241eef32010-01-19 21:25:26 -0800428 unsigned int op_active:1;
Santosh Shilimkar941f8d52016-02-18 20:06:47 -0800429 unsigned int op_notify:1;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800430 unsigned int op_nents;
431 unsigned int op_count;
Wengang Wangd655a9f2015-05-21 13:11:40 +0800432 unsigned int op_dmasg;
433 unsigned int op_dmaoff;
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800434 struct scatterlist *op_sg;
Andy Grovere7791372010-01-12 12:15:02 -0800435 } data;
436 };
Andy Grover39de8282009-02-24 15:30:19 +0000437};
438
439/*
440 * The RDS notifier is used (optionally) to tell the application about
441 * completed RDMA operations. Rather than keeping the whole rds message
442 * around on the queue, we allocate a small notifier that is put on the
443 * socket's notifier_list. Notifications are delivered to the application
444 * through control messages.
445 */
446struct rds_notifier {
447 struct list_head n_list;
448 uint64_t n_user_token;
449 int n_status;
450};
451
452/**
453 * struct rds_transport - transport specific behavioural hooks
454 *
455 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
456 * part of a message. The caller serializes on the send_sem so this
457 * doesn't need to be reentrant for a given conn. The header must be
458 * sent before the data payload. .xmit must be prepared to send a
459 * message with no data payload. .xmit should return the number of
460 * bytes that were sent down the connection, including header bytes.
461 * Returning 0 tells the caller that it doesn't need to perform any
462 * additional work now. This is usually the case when the transport has
463 * filled the sending queue for its connection and will handle
464 * triggering the rds thread to continue the send when space becomes
465 * available. Returning -EAGAIN tells the caller to retry the send
466 * immediately. Returning -ENOMEM tells the caller to retry the send at
467 * some point in the future.
468 *
469 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
470 * it returns the connection can not call rds_recv_incoming().
471 * This will only be called once after conn_connect returns
472 * non-zero success and will The caller serializes this with
473 * the send and connecting paths (xmit_* and conn_*). The
474 * transport is responsible for other serialization, including
475 * rds_recv_incoming(). This is called in process context but
476 * should try hard not to block.
Andy Grover39de8282009-02-24 15:30:19 +0000477 */
478
479struct rds_transport {
480 char t_name[TRANSNAMSIZ];
481 struct list_head t_item;
482 struct module *t_owner;
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700483 unsigned int t_prefer_loopback:1,
484 t_mp_capable:1;
Andy Grover335776b2009-08-21 12:28:34 +0000485 unsigned int t_type;
Andy Grover39de8282009-02-24 15:30:19 +0000486
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400487 int (*laddr_check)(struct net *net, __be32 addr);
Andy Grover39de8282009-02-24 15:30:19 +0000488 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
489 void (*conn_free)(void *data);
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700490 int (*conn_path_connect)(struct rds_conn_path *cp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700491 void (*conn_path_shutdown)(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700492 void (*xmit_path_prepare)(struct rds_conn_path *cp);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700493 void (*xmit_path_complete)(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000494 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
495 unsigned int hdr_off, unsigned int sg, unsigned int off);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800496 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
Andy Groverff3d7d32010-03-01 14:03:09 -0800497 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700498 int (*recv_path)(struct rds_conn_path *cp);
Al Viroc310e722014-11-20 09:21:14 -0500499 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000500 void (*inc_free)(struct rds_incoming *inc);
501
502 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
503 struct rdma_cm_event *event);
504 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
505 void (*cm_connect_complete)(struct rds_connection *conn,
506 struct rdma_cm_event *event);
507
508 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
509 unsigned int avail);
510 void (*exit)(void);
511 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
512 struct rds_sock *rs, u32 *key_ret);
513 void (*sync_mr)(void *trans_private, int direction);
514 void (*free_mr)(void *trans_private, int invalidate);
515 void (*flush_mrs)(void);
516};
517
518struct rds_sock {
519 struct sock rs_sk;
520
521 u64 rs_user_addr;
522 u64 rs_user_bytes;
523
524 /*
525 * bound_addr used for both incoming and outgoing, no INADDR_ANY
526 * support.
527 */
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700528 struct rhash_head rs_bound_node;
529 u64 rs_bound_key;
Andy Grover39de8282009-02-24 15:30:19 +0000530 __be32 rs_bound_addr;
531 __be32 rs_conn_addr;
532 __be16 rs_bound_port;
533 __be16 rs_conn_port;
Andy Grover39de8282009-02-24 15:30:19 +0000534 struct rds_transport *rs_transport;
535
536 /*
537 * rds_sendmsg caches the conn it used the last time around.
538 * This helps avoid costly lookups.
539 */
540 struct rds_connection *rs_conn;
541
542 /* flag indicating we were congested or not */
543 int rs_congested;
Andy Groverb98ba522010-03-11 13:50:04 +0000544 /* seen congestion (ENOBUFS) when sending? */
545 int rs_seen_congestion;
Andy Grover39de8282009-02-24 15:30:19 +0000546
547 /* rs_lock protects all these adjacent members before the newline */
548 spinlock_t rs_lock;
549 struct list_head rs_send_queue;
550 u32 rs_snd_bytes;
551 int rs_rcv_bytes;
552 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
553
554 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
555 * to decide whether the application should be woken up.
556 * If not set, we use rs_cong_track to find out whether a cong map
557 * update arrived.
558 */
559 uint64_t rs_cong_mask;
560 uint64_t rs_cong_notify;
561 struct list_head rs_cong_list;
562 unsigned long rs_cong_track;
563
564 /*
565 * rs_recv_lock protects the receive queue, and is
566 * used to serialize with rds_release.
567 */
568 rwlock_t rs_recv_lock;
569 struct list_head rs_recv_queue;
570
571 /* just for stats reporting */
572 struct list_head rs_item;
573
574 /* these have their own lock */
575 spinlock_t rs_rdma_lock;
576 struct rb_root rs_rdma_keys;
577
578 /* Socket options - in case there will be more */
579 unsigned char rs_recverr,
580 rs_cong_monitor;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700581 u32 rs_hash_initval;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700582
583 /* Socket receive path trace points*/
584 u8 rs_rx_traces;
585 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
Andy Grover39de8282009-02-24 15:30:19 +0000586};
587
588static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
589{
590 return container_of(sk, struct rds_sock, rs_sk);
591}
592static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
593{
594 return &rs->rs_sk;
595}
596
597/*
598 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
599 * to account for overhead. We don't account for overhead, we just apply
600 * the number of payload bytes to the specified value.
601 */
602static inline int rds_sk_sndbuf(struct rds_sock *rs)
603{
604 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
605}
606static inline int rds_sk_rcvbuf(struct rds_sock *rs)
607{
608 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
609}
610
611struct rds_statistics {
612 uint64_t s_conn_reset;
613 uint64_t s_recv_drop_bad_checksum;
614 uint64_t s_recv_drop_old_seq;
615 uint64_t s_recv_drop_no_sock;
616 uint64_t s_recv_drop_dead_sock;
617 uint64_t s_recv_deliver_raced;
618 uint64_t s_recv_delivered;
619 uint64_t s_recv_queued;
620 uint64_t s_recv_immediate_retry;
621 uint64_t s_recv_delayed_retry;
622 uint64_t s_recv_ack_required;
623 uint64_t s_recv_rdma_bytes;
624 uint64_t s_recv_ping;
625 uint64_t s_send_queue_empty;
626 uint64_t s_send_queue_full;
Andy Grover049ee3f2010-03-23 17:39:07 -0700627 uint64_t s_send_lock_contention;
628 uint64_t s_send_lock_queue_raced;
Andy Grover39de8282009-02-24 15:30:19 +0000629 uint64_t s_send_immediate_retry;
630 uint64_t s_send_delayed_retry;
631 uint64_t s_send_drop_acked;
632 uint64_t s_send_ack_required;
633 uint64_t s_send_queued;
634 uint64_t s_send_rdma;
635 uint64_t s_send_rdma_bytes;
636 uint64_t s_send_pong;
637 uint64_t s_page_remainder_hit;
638 uint64_t s_page_remainder_miss;
639 uint64_t s_copy_to_user;
640 uint64_t s_copy_from_user;
641 uint64_t s_cong_update_queued;
642 uint64_t s_cong_update_received;
643 uint64_t s_cong_send_error;
644 uint64_t s_cong_send_blocked;
Venkat Venkatsubra192a7982016-07-09 17:36:20 -0700645 uint64_t s_recv_bytes_added_to_socket;
646 uint64_t s_recv_bytes_removed_from_socket;
647
Andy Grover39de8282009-02-24 15:30:19 +0000648};
649
650/* af_rds.c */
651void rds_sock_addref(struct rds_sock *rs);
652void rds_sock_put(struct rds_sock *rs);
653void rds_wake_sk_sleep(struct rds_sock *rs);
654static inline void __rds_wake_sk_sleep(struct sock *sk)
655{
Eric Dumazetaa395142010-04-20 13:03:51 +0000656 wait_queue_head_t *waitq = sk_sleep(sk);
Andy Grover39de8282009-02-24 15:30:19 +0000657
658 if (!sock_flag(sk, SOCK_DEAD) && waitq)
659 wake_up(waitq);
660}
661extern wait_queue_head_t rds_poll_waitq;
662
663
664/* bind.c */
665int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
666void rds_remove_bound(struct rds_sock *rs);
667struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
santosh.shilimkar@oracle.com7b565432015-10-30 08:49:10 -0700668int rds_bind_lock_init(void);
669void rds_bind_lock_destroy(void);
Andy Grover39de8282009-02-24 15:30:19 +0000670
671/* cong.c */
672int rds_cong_get_maps(struct rds_connection *conn);
673void rds_cong_add_conn(struct rds_connection *conn);
674void rds_cong_remove_conn(struct rds_connection *conn);
675void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
676void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
677int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
678void rds_cong_queue_updates(struct rds_cong_map *map);
679void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
680int rds_cong_updated_since(unsigned long *recent);
681void rds_cong_add_socket(struct rds_sock *);
682void rds_cong_remove_socket(struct rds_sock *);
683void rds_cong_exit(void);
684struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
685
686/* conn.c */
Sowmini Varadhan905dd412016-11-16 13:29:49 -0800687extern u32 rds_gen_num;
Zach Brownef87b7e2010-07-09 12:26:20 -0700688int rds_conn_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000689void rds_conn_exit(void);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400690struct rds_connection *rds_conn_create(struct net *net,
691 __be32 laddr, __be32 faddr,
Andy Grover39de8282009-02-24 15:30:19 +0000692 struct rds_transport *trans, gfp_t gfp);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400693struct rds_connection *rds_conn_create_outgoing(struct net *net,
694 __be32 laddr, __be32 faddr,
Andy Grover39de8282009-02-24 15:30:19 +0000695 struct rds_transport *trans, gfp_t gfp);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700696void rds_conn_shutdown(struct rds_conn_path *cpath);
Andy Grover39de8282009-02-24 15:30:19 +0000697void rds_conn_destroy(struct rds_connection *conn);
Andy Grover39de8282009-02-24 15:30:19 +0000698void rds_conn_drop(struct rds_connection *conn);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700699void rds_conn_path_drop(struct rds_conn_path *cpath);
Zach Brownf3c68082010-05-24 13:14:36 -0700700void rds_conn_connect_if_down(struct rds_connection *conn);
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -0700701void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000702void rds_for_each_conn_info(struct socket *sock, unsigned int len,
703 struct rds_info_iterator *iter,
704 struct rds_info_lengths *lens,
705 int (*visitor)(struct rds_connection *, void *),
706 size_t item_len);
Andy Grover39de8282009-02-24 15:30:19 +0000707
Nicolas Iooss6cdaf032016-08-05 22:11:12 +0200708__printf(2, 3)
Sowmini Varadhanfb1b3dc2016-06-13 09:44:39 -0700709void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
710#define rds_conn_path_error(cp, fmt...) \
711 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
712
Andy Grover39de8282009-02-24 15:30:19 +0000713static inline int
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700714rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
715{
716 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
717}
718
719static inline int
Andy Grover39de8282009-02-24 15:30:19 +0000720rds_conn_transition(struct rds_connection *conn, int old, int new)
721{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700722 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700723 return rds_conn_path_transition(&conn->c_path[0], old, new);
724}
725
726static inline int
727rds_conn_path_state(struct rds_conn_path *cp)
728{
729 return atomic_read(&cp->cp_state);
Andy Grover39de8282009-02-24 15:30:19 +0000730}
731
732static inline int
733rds_conn_state(struct rds_connection *conn)
734{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700735 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700736 return rds_conn_path_state(&conn->c_path[0]);
737}
738
739static inline int
740rds_conn_path_up(struct rds_conn_path *cp)
741{
742 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
Andy Grover39de8282009-02-24 15:30:19 +0000743}
744
745static inline int
746rds_conn_up(struct rds_connection *conn)
747{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700748 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700749 return rds_conn_path_up(&conn->c_path[0]);
750}
751
752static inline int
753rds_conn_path_connecting(struct rds_conn_path *cp)
754{
755 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
Andy Grover39de8282009-02-24 15:30:19 +0000756}
757
758static inline int
759rds_conn_connecting(struct rds_connection *conn)
760{
Sowmini Varadhan7e8f4412016-06-13 09:44:27 -0700761 WARN_ON(conn->c_trans->t_mp_capable);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700762 return rds_conn_path_connecting(&conn->c_path[0]);
Andy Grover39de8282009-02-24 15:30:19 +0000763}
764
765/* message.c */
766struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
Andy Groverfc445082010-01-12 12:56:06 -0800767struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
Al Viro083735f2014-11-20 09:31:08 -0500768int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from);
Andy Grover39de8282009-02-24 15:30:19 +0000769struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
770void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
771 __be16 dport, u64 seq);
772int rds_message_add_extension(struct rds_header *hdr,
773 unsigned int type, const void *data, unsigned int len);
774int rds_message_next_extension(struct rds_header *hdr,
775 unsigned int *pos, void *buf, unsigned int *buflen);
Andy Grover39de8282009-02-24 15:30:19 +0000776int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
Al Viroc310e722014-11-20 09:21:14 -0500777int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
Andy Grover39de8282009-02-24 15:30:19 +0000778void rds_message_inc_free(struct rds_incoming *inc);
779void rds_message_addref(struct rds_message *rm);
780void rds_message_put(struct rds_message *rm);
781void rds_message_wait(struct rds_message *rm);
782void rds_message_unmapped(struct rds_message *rm);
783
784static inline void rds_message_make_checksum(struct rds_header *hdr)
785{
786 hdr->h_csum = 0;
787 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
788}
789
790static inline int rds_message_verify_checksum(const struct rds_header *hdr)
791{
792 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
793}
794
795
796/* page.c */
797int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
798 gfp_t gfp);
Andy Grover39de8282009-02-24 15:30:19 +0000799void rds_page_exit(void);
800
801/* recv.c */
802void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
803 __be32 saddr);
Sowmini Varadhan5e833e02016-06-13 09:44:29 -0700804void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
805 __be32 saddr);
Andy Grover39de8282009-02-24 15:30:19 +0000806void rds_inc_put(struct rds_incoming *inc);
807void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800808 struct rds_incoming *inc, gfp_t gfp);
Ying Xue1b784142015-03-02 15:37:48 +0800809int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
810 int msg_flags);
Andy Grover39de8282009-02-24 15:30:19 +0000811void rds_clear_recv_queue(struct rds_sock *rs);
812int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
813void rds_inc_info_copy(struct rds_incoming *inc,
814 struct rds_info_iterator *iter,
815 __be32 saddr, __be32 daddr, int flip);
816
817/* send.c */
Ying Xue1b784142015-03-02 15:37:48 +0800818int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700819void rds_send_path_reset(struct rds_conn_path *conn);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700820int rds_send_xmit(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000821struct sockaddr_in;
822void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
823typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
824void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
825 is_acked_func is_acked);
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700826void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
827 is_acked_func is_acked);
Sowmini Varadhan45997e92016-06-13 09:44:36 -0700828int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
Andy Grover39de8282009-02-24 15:30:19 +0000829
830/* rdma.c */
831void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
Andy Grover21f79af2010-01-12 12:57:27 -0800832int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
833int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
834int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
835void rds_rdma_drop_keys(struct rds_sock *rs);
836int rds_rdma_extra_size(struct rds_rdma_args *args);
837int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
838 struct cmsghdr *cmsg);
839int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
840 struct cmsghdr *cmsg);
841int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
842 struct cmsghdr *cmsg);
843int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
844 struct cmsghdr *cmsg);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800845void rds_rdma_free_op(struct rm_rdma_op *ro);
Andy Groverd0ab25a2010-01-27 16:15:48 -0800846void rds_atomic_free_op(struct rm_atomic_op *ao);
Andy Grover15133f62010-01-12 14:33:38 -0800847void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
848void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
849int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
850 struct cmsghdr *cmsg);
Andy Grover21f79af2010-01-12 12:57:27 -0800851
Joe Perchesc1b12032013-10-18 13:48:25 -0700852void __rds_put_mr_final(struct rds_mr *mr);
Andy Grover21f79af2010-01-12 12:57:27 -0800853static inline void rds_mr_put(struct rds_mr *mr)
854{
855 if (atomic_dec_and_test(&mr->r_refcount))
856 __rds_put_mr_final(mr);
857}
Andy Grover39de8282009-02-24 15:30:19 +0000858
859/* stats.c */
David Howells9b8de742009-04-21 23:00:24 +0100860DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
Andy Grover39de8282009-02-24 15:30:19 +0000861#define rds_stats_inc_which(which, member) do { \
862 per_cpu(which, get_cpu()).member++; \
863 put_cpu(); \
864} while (0)
865#define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
866#define rds_stats_add_which(which, member, count) do { \
867 per_cpu(which, get_cpu()).member += count; \
868 put_cpu(); \
869} while (0)
870#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
Zach Brownef87b7e2010-07-09 12:26:20 -0700871int rds_stats_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000872void rds_stats_exit(void);
873void rds_stats_info_copy(struct rds_info_iterator *iter,
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700874 uint64_t *values, const char *const *names,
875 size_t nr);
Andy Grover39de8282009-02-24 15:30:19 +0000876
877/* sysctl.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700878int rds_sysctl_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000879void rds_sysctl_exit(void);
880extern unsigned long rds_sysctl_sndbuf_min;
881extern unsigned long rds_sysctl_sndbuf_default;
882extern unsigned long rds_sysctl_sndbuf_max;
883extern unsigned long rds_sysctl_reconnect_min_jiffies;
884extern unsigned long rds_sysctl_reconnect_max_jiffies;
885extern unsigned int rds_sysctl_max_unacked_packets;
886extern unsigned int rds_sysctl_max_unacked_bytes;
887extern unsigned int rds_sysctl_ping_enable;
888extern unsigned long rds_sysctl_trace_flags;
889extern unsigned int rds_sysctl_trace_level;
890
891/* threads.c */
Zach Brownef87b7e2010-07-09 12:26:20 -0700892int rds_threads_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000893void rds_threads_exit(void);
894extern struct workqueue_struct *rds_wq;
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700895void rds_queue_reconnect(struct rds_conn_path *cp);
Andy Grover39de8282009-02-24 15:30:19 +0000896void rds_connect_worker(struct work_struct *);
897void rds_shutdown_worker(struct work_struct *);
898void rds_send_worker(struct work_struct *);
899void rds_recv_worker(struct work_struct *);
Sowmini Varadhan0cb43962016-06-13 09:44:26 -0700900void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
Andy Grover39de8282009-02-24 15:30:19 +0000901void rds_connect_complete(struct rds_connection *conn);
902
903/* transport.c */
Zhu Yanjuna8d63a52017-03-03 00:44:26 -0500904void rds_trans_register(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +0000905void rds_trans_unregister(struct rds_transport *trans);
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400906struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
Zach Brown5adb5bc2010-07-23 10:32:31 -0700907void rds_trans_put(struct rds_transport *trans);
Andy Grover39de8282009-02-24 15:30:19 +0000908unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
909 unsigned int avail);
Sowmini Varadhand97dac52015-05-29 17:28:08 -0400910struct rds_transport *rds_trans_get(int t_type);
Zach Brownef87b7e2010-07-09 12:26:20 -0700911int rds_trans_init(void);
Andy Grover39de8282009-02-24 15:30:19 +0000912void rds_trans_exit(void);
913
914#endif