blob: 37b2001a02557e7f8b034c8a53d9fe3a3f4d0956 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ursula Braunac713872017-01-09 16:55:13 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Definitions for the SMC module (socket related)
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11#ifndef __SMC_H
12#define __SMC_H
13
14#include <linux/socket.h>
15#include <linux/types.h>
Ursula Braunf38ba1792017-01-09 16:55:19 +010016#include <linux/compiler.h> /* __aligned */
Ursula Braunac713872017-01-09 16:55:13 +010017#include <net/sock.h>
18
Ursula Braun0cfdd8f2017-01-09 16:55:17 +010019#include "smc_ib.h"
20
Ursula Braun5ac54d82020-09-10 18:48:21 +020021#define SMC_V1 1 /* SMC version V1 */
Ursula Braund70bf4f2020-09-26 12:44:27 +020022#define SMC_V2 2 /* SMC version V2 */
Ursula Braun8c3dca32020-09-26 12:44:28 +020023#define SMC_RELEASE 0
Ursula Braun5ac54d82020-09-10 18:48:21 +020024
Karsten Graulaaa4d332018-03-16 15:06:41 +010025#define SMCPROTO_SMC 0 /* SMC protocol, IPv4 */
26#define SMCPROTO_SMC6 1 /* SMC protocol, IPv6 */
Ursula Braunac713872017-01-09 16:55:13 +010027
Ursula Braun3fc64932020-09-26 12:44:23 +020028#define SMC_MAX_ISM_DEVS 8 /* max # of proposed non-native ISM
29 * devices
30 */
31
Ursula Braunf16a7dd2017-01-09 16:55:26 +010032extern struct proto smc_proto;
Karsten Graulaaa4d332018-03-16 15:06:41 +010033extern struct proto smc_proto6;
Ursula Braunf16a7dd2017-01-09 16:55:26 +010034
Ursula Braun5f083182017-01-09 16:55:22 +010035#ifdef ATOMIC64_INIT
36#define KERNEL_HAS_ATOMIC64
37#endif
38
Ursula Braunac713872017-01-09 16:55:13 +010039enum smc_state { /* possible states of an SMC socket */
40 SMC_ACTIVE = 1,
41 SMC_INIT = 2,
42 SMC_CLOSED = 7,
43 SMC_LISTEN = 10,
Ursula Braunb38d7322017-01-09 16:55:25 +010044 /* normal close */
45 SMC_PEERCLOSEWAIT1 = 20,
46 SMC_PEERCLOSEWAIT2 = 21,
47 SMC_APPFINCLOSEWAIT = 24,
48 SMC_APPCLOSEWAIT1 = 22,
49 SMC_APPCLOSEWAIT2 = 23,
50 SMC_PEERFINCLOSEWAIT = 25,
51 /* abnormal close */
52 SMC_PEERABORTWAIT = 26,
53 SMC_PROCESSABORT = 27,
Ursula Braunac713872017-01-09 16:55:13 +010054};
55
Ursula Braun0cfdd8f2017-01-09 16:55:17 +010056struct smc_link_group;
57
Ursula Braunf38ba1792017-01-09 16:55:19 +010058struct smc_wr_rx_hdr { /* common prefix part of LLC and CDC to demultiplex */
Karsten Graulb4ba4652021-10-16 11:37:50 +020059 union {
60 u8 type;
61#if defined(__BIG_ENDIAN_BITFIELD)
62 struct {
63 u8 llc_version:4,
64 llc_type:4;
65 };
66#elif defined(__LITTLE_ENDIAN_BITFIELD)
67 struct {
68 u8 llc_type:4,
69 llc_version:4;
70 };
71#endif
72 };
Ursula Braunf38ba1792017-01-09 16:55:19 +010073} __aligned(1);
74
Ursula Braun5f083182017-01-09 16:55:22 +010075struct smc_cdc_conn_state_flags {
76#if defined(__BIG_ENDIAN_BITFIELD)
77 u8 peer_done_writing : 1; /* Sending done indicator */
78 u8 peer_conn_closed : 1; /* Peer connection closed indicator */
79 u8 peer_conn_abort : 1; /* Abnormal close indicator */
80 u8 reserved : 5;
81#elif defined(__LITTLE_ENDIAN_BITFIELD)
82 u8 reserved : 5;
83 u8 peer_conn_abort : 1;
84 u8 peer_conn_closed : 1;
85 u8 peer_done_writing : 1;
86#endif
87};
88
89struct smc_cdc_producer_flags {
90#if defined(__BIG_ENDIAN_BITFIELD)
91 u8 write_blocked : 1; /* Writing Blocked, no rx buf space */
92 u8 urg_data_pending : 1; /* Urgent Data Pending */
93 u8 urg_data_present : 1; /* Urgent Data Present */
94 u8 cons_curs_upd_req : 1; /* cursor update requested */
95 u8 failover_validation : 1;/* message replay due to failover */
96 u8 reserved : 3;
97#elif defined(__LITTLE_ENDIAN_BITFIELD)
98 u8 reserved : 3;
99 u8 failover_validation : 1;
100 u8 cons_curs_upd_req : 1;
101 u8 urg_data_present : 1;
102 u8 urg_data_pending : 1;
103 u8 write_blocked : 1;
104#endif
105};
106
107/* in host byte order */
108union smc_host_cursor { /* SMC cursor - an offset in an RMBE */
109 struct {
110 u16 reserved;
111 u16 wrap; /* window wrap sequence number */
112 u32 count; /* cursor (= offset) part */
113 };
114#ifdef KERNEL_HAS_ATOMIC64
115 atomic64_t acurs; /* for atomic processing */
116#else
117 u64 acurs; /* for atomic processing */
118#endif
119} __aligned(8);
120
121/* in host byte order, except for flag bitfields in network byte order */
122struct smc_host_cdc_msg { /* Connection Data Control message */
123 struct smc_wr_rx_hdr common; /* .type = 0xFE */
124 u8 len; /* length = 44 */
125 u16 seqno; /* connection seq # */
126 u32 token; /* alert_token */
127 union smc_host_cursor prod; /* producer cursor */
128 union smc_host_cursor cons; /* consumer cursor,
129 * piggy backed "ack"
130 */
131 struct smc_cdc_producer_flags prod_flags; /* conn. tx/rx status */
132 struct smc_cdc_conn_state_flags conn_state_flags; /* peer conn. status*/
133 u8 reserved[18];
134} __aligned(8);
135
Stefan Rasplde8474e2018-05-23 16:38:11 +0200136enum smc_urg_state {
Ursula Braund7cf4a32019-02-21 12:56:54 +0100137 SMC_URG_VALID = 1, /* data present */
138 SMC_URG_NOTYET = 2, /* data pending */
139 SMC_URG_READ = 3, /* data was already read */
Stefan Rasplde8474e2018-05-23 16:38:11 +0200140};
141
Wen Gu341adee2022-01-26 23:33:04 +0800142struct smc_mark_woken {
143 bool woken;
144 void *key;
145 wait_queue_entry_t wait_entry;
146};
147
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100148struct smc_connection {
149 struct rb_node alert_node;
150 struct smc_link_group *lgr; /* link group of connection */
Karsten Graul387707f2020-04-29 17:10:40 +0200151 struct smc_link *lnk; /* assigned SMC-R link */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100152 u32 alert_token_local; /* unique conn. id */
Hans Wippel92a138e2018-05-18 09:34:12 +0200153 u8 peer_rmbe_idx; /* from tcp handshake */
Ursula Brauncd6851f2017-01-09 16:55:18 +0100154 int peer_rmbe_size; /* size of peer rx buffer */
155 atomic_t peer_rmbe_space;/* remaining free bytes in peer
156 * rmbe
157 */
Ursula Braunbd4ad572017-01-09 16:55:20 +0100158 int rtoken_idx; /* idx to peer RMB rkey/addr */
Ursula Brauncd6851f2017-01-09 16:55:18 +0100159
160 struct smc_buf_desc *sndbuf_desc; /* send buffer descriptor */
Ursula Brauncd6851f2017-01-09 16:55:18 +0100161 struct smc_buf_desc *rmb_desc; /* RMBE descriptor */
Ursula Brauncd6851f2017-01-09 16:55:18 +0100162 int rmbe_size_short;/* compressed notation */
Ursula Braun952310c2017-01-09 16:55:24 +0100163 int rmbe_update_limit;
164 /* lower limit for consumer
165 * cursor update
166 */
Ursula Braun5f083182017-01-09 16:55:22 +0100167
168 struct smc_host_cdc_msg local_tx_ctrl; /* host byte order staging
169 * buffer for CDC msg send
170 * .prod cf. TCP snd_nxt
171 * .cons cf. TCP sends ack
172 */
Karsten Graulf0ec4f12020-05-04 14:18:37 +0200173 union smc_host_cursor local_tx_ctrl_fin;
174 /* prod crsr - confirmed by peer
175 */
Ursula Braun5f083182017-01-09 16:55:22 +0100176 union smc_host_cursor tx_curs_prep; /* tx - prepared data
177 * snd_max..wmem_alloc
178 */
179 union smc_host_cursor tx_curs_sent; /* tx - sent data
180 * snd_nxt ?
181 */
182 union smc_host_cursor tx_curs_fin; /* tx - confirmed by peer
183 * snd-wnd-begin ?
184 */
185 atomic_t sndbuf_space; /* remaining space in sndbuf */
186 u16 tx_cdc_seq; /* sequence # for CDC send */
Karsten Graulf0ec4f12020-05-04 14:18:37 +0200187 u16 tx_cdc_seq_fin; /* sequence # - tx completed */
Ursula Braun5f083182017-01-09 16:55:22 +0100188 spinlock_t send_lock; /* protect wr_sends */
Dust Li349d4312021-12-28 17:03:25 +0800189 atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe
190 * - inc when post wqe,
191 * - dec on polled tx cqe
192 */
193 wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
Ursula Braun18e537c2017-09-21 09:16:33 +0200194 struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
Hans Wippel95d8d2632018-05-18 09:34:13 +0200195 u32 tx_off; /* base offset in peer rmb */
Ursula Braun5f083182017-01-09 16:55:22 +0100196
197 struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl.
198 * .prod cf. TCP rcv_nxt
199 * .cons cf. TCP snd_una
200 */
201 union smc_host_cursor rx_curs_confirmed; /* confirmed to peer
202 * source of snd_una ?
203 */
Stefan Rasplde8474e2018-05-23 16:38:11 +0200204 union smc_host_cursor urg_curs; /* points at urgent byte */
205 enum smc_urg_state urg_state;
206 bool urg_tx_pend; /* urgent data staged */
207 bool urg_rx_skip_pend;
208 /* indicate urgent oob data
209 * read, but previous regular
210 * data still pending
211 */
212 char urg_rx_byte; /* urgent byte */
Ursula Braun5f083182017-01-09 16:55:22 +0100213 atomic_t bytes_to_rcv; /* arrived data,
214 * not yet received
215 */
Stefan Raspl9014db22018-05-03 18:12:39 +0200216 atomic_t splice_pending; /* number of spliced bytes
217 * pending processing
218 */
Ursula Braun5f083182017-01-09 16:55:22 +0100219#ifndef KERNEL_HAS_ATOMIC64
220 spinlock_t acurs_lock; /* protect cursors */
221#endif
Ursula Braun46c28db2017-04-10 14:58:01 +0200222 struct work_struct close_work; /* peer sent some closing */
Karsten Graulb286a062020-05-04 14:18:40 +0200223 struct work_struct abort_work; /* abort the connection */
Hans Wippelbe244f22018-06-28 19:05:10 +0200224 struct tasklet_struct rx_tsklet; /* Receiver tasklet for SMC-D */
225 u8 rx_off; /* receive offset:
226 * 0 for SMC-R, 32 for SMC-D
227 */
228 u64 peer_token; /* SMC-D token of peer */
Ursula Braunb2900982019-10-21 16:13:08 +0200229 u8 killed : 1; /* abnormal termination */
Wen Gu61f434b2022-01-13 16:36:40 +0800230 u8 freed : 1; /* normal termiation */
Karsten Graulb286a062020-05-04 14:18:40 +0200231 u8 out_of_sync : 1; /* out of sync with peer */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100232};
233
Ursula Braunac713872017-01-09 16:55:13 +0100234struct smc_sock { /* smc sock container */
235 struct sock sk;
236 struct socket *clcsock; /* internal tcp socket */
Wen Gu341adee2022-01-26 23:33:04 +0800237 void (*clcsk_state_change)(struct sock *sk);
238 /* original stat_change fct. */
Ursula Brauna60a2b12020-09-10 18:48:20 +0200239 void (*clcsk_data_ready)(struct sock *sk);
Wen Gu341adee2022-01-26 23:33:04 +0800240 /* original data_ready fct. */
241 void (*clcsk_write_space)(struct sock *sk);
242 /* original write_space fct. */
243 void (*clcsk_error_report)(struct sock *sk);
244 /* original error_report fct. */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100245 struct smc_connection conn; /* smc connection */
Ursula Brauna046d572017-01-09 16:55:16 +0100246 struct smc_sock *listen_smc; /* listen parent */
Ursula Braun24ac3a02018-06-27 17:59:50 +0200247 struct work_struct connect_work; /* handle non-blocking connect*/
Ursula Brauna046d572017-01-09 16:55:16 +0100248 struct work_struct tcp_listen_work;/* handle tcp socket accepts */
249 struct work_struct smc_listen_work;/* prepare new accept socket */
250 struct list_head accept_q; /* sockets to be accepted */
251 spinlock_t accept_q_lock; /* protects accept_q */
Ursula Braunac713872017-01-09 16:55:13 +0100252 bool use_fallback; /* fallback to tcp */
Karsten Graul603cc142018-07-25 16:35:32 +0200253 int fallback_rsn; /* reason for fallback */
254 u32 peer_diagnosis; /* decline reason from peer */
Ursula Braunabb190f2018-04-26 17:18:23 +0200255 int sockopt_defer_accept;
256 /* sockopt TCP_DEFER_ACCEPT
257 * value
258 */
Ursula Braunb38d7322017-01-09 16:55:25 +0100259 u8 wait_close_tx_prepared : 1;
260 /* shutdown wr or close
261 * started, waiting for unsent
262 * data to be sent
263 */
Ursula Braun50717a32019-04-12 12:57:23 +0200264 u8 connect_nonblock : 1;
265 /* non-blocking connect in
266 * flight
267 */
Myungho Jung78abe3d2018-12-18 09:02:25 -0800268 struct mutex clcsock_release_lock;
269 /* protects clcsock of a listen
270 * socket
271 * */
Ursula Braunac713872017-01-09 16:55:13 +0100272};
273
274static inline struct smc_sock *smc_sk(const struct sock *sk)
275{
276 return (struct smc_sock *)sk;
277}
278
Wen Gu341adee2022-01-26 23:33:04 +0800279static inline struct smc_sock *smc_clcsock_user_data(struct sock *clcsk)
280{
281 return (struct smc_sock *)
282 ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
283}
284
Karsten Graul22ef4732020-09-10 18:48:29 +0200285extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
286extern struct workqueue_struct *smc_close_wq; /* wq for close work */
287
Ursula Brauna4cf0442017-01-09 16:55:14 +0100288#define SMC_SYSTEMID_LEN 8
289
290extern u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
291
Ursula Braun8c3dca32020-09-26 12:44:28 +0200292#define ntohll(x) be64_to_cpu(x)
293#define htonll(x) cpu_to_be64(x)
294
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100295/* convert an u32 value into network byte order, store it into a 3 byte field */
296static inline void hton24(u8 *net, u32 host)
297{
298 __be32 t;
299
300 t = cpu_to_be32(host);
301 memcpy(net, ((u8 *)&t) + 1, 3);
302}
303
304/* convert a received 3 byte field into host byte order*/
305static inline u32 ntoh24(u8 *net)
306{
307 __be32 t = 0;
308
309 memcpy(((u8 *)&t) + 1, net, 3);
310 return be32_to_cpu(t);
311}
312
Ursula Brauna046d572017-01-09 16:55:16 +0100313#ifdef CONFIG_XFRM
314static inline bool using_ipsec(struct smc_sock *smc)
315{
316 return (smc->clcsock->sk->sk_policy[0] ||
Gustavo A. R. Silvaa8fbf8e2018-01-26 09:28:50 +0100317 smc->clcsock->sk->sk_policy[1]) ? true : false;
Ursula Brauna046d572017-01-09 16:55:16 +0100318}
319#else
320static inline bool using_ipsec(struct smc_sock *smc)
321{
Gustavo A. R. Silvaa8fbf8e2018-01-26 09:28:50 +0100322 return false;
Ursula Brauna046d572017-01-09 16:55:16 +0100323}
324#endif
325
Karsten Graulb4ba4652021-10-16 11:37:50 +0200326struct smc_gidlist;
327
Ursula Braunb38d7322017-01-09 16:55:25 +0100328struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock);
329void smc_close_non_accepted(struct sock *sk);
Karsten Graulb4ba4652021-10-16 11:37:50 +0200330void smc_fill_gid_list(struct smc_link_group *lgr,
331 struct smc_gidlist *gidlist,
332 struct smc_ib_device *known_dev, u8 *known_gid);
Ursula Brauna046d572017-01-09 16:55:16 +0100333
Ursula Braunac713872017-01-09 16:55:13 +0100334#endif /* __SMC_H */