Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Shared Memory Communications over RDMA (SMC-R) and RoCE |
| 4 | * |
| 5 | * Definitions for the SMC module (socket related) |
| 6 | * |
| 7 | * Copyright IBM Corp. 2016 |
| 8 | * |
| 9 | * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> |
| 10 | */ |
| 11 | #ifndef __SMC_H |
| 12 | #define __SMC_H |
| 13 | |
| 14 | #include <linux/socket.h> |
| 15 | #include <linux/types.h> |
Ursula Braun | f38ba179 | 2017-01-09 16:55:19 +0100 | [diff] [blame] | 16 | #include <linux/compiler.h> /* __aligned */ |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 17 | #include <net/sock.h> |
| 18 | |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 19 | #include "smc_ib.h" |
| 20 | |
Karsten Graul | aaa4d33 | 2018-03-16 15:06:41 +0100 | [diff] [blame] | 21 | #define SMCPROTO_SMC 0 /* SMC protocol, IPv4 */ |
| 22 | #define SMCPROTO_SMC6 1 /* SMC protocol, IPv6 */ |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 23 | |
Ursula Braun | a4cf044 | 2017-01-09 16:55:14 +0100 | [diff] [blame] | 24 | #define SMC_MAX_PORTS 2 /* Max # of ports */ |
| 25 | |
Ursula Braun | f16a7dd | 2017-01-09 16:55:26 +0100 | [diff] [blame] | 26 | extern struct proto smc_proto; |
Karsten Graul | aaa4d33 | 2018-03-16 15:06:41 +0100 | [diff] [blame] | 27 | extern struct proto smc_proto6; |
Ursula Braun | f16a7dd | 2017-01-09 16:55:26 +0100 | [diff] [blame] | 28 | |
Ursula Braun | 5f08318 | 2017-01-09 16:55:22 +0100 | [diff] [blame] | 29 | #ifdef ATOMIC64_INIT |
| 30 | #define KERNEL_HAS_ATOMIC64 |
| 31 | #endif |
| 32 | |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 33 | enum smc_state { /* possible states of an SMC socket */ |
| 34 | SMC_ACTIVE = 1, |
| 35 | SMC_INIT = 2, |
| 36 | SMC_CLOSED = 7, |
| 37 | SMC_LISTEN = 10, |
Ursula Braun | b38d732 | 2017-01-09 16:55:25 +0100 | [diff] [blame] | 38 | /* normal close */ |
| 39 | SMC_PEERCLOSEWAIT1 = 20, |
| 40 | SMC_PEERCLOSEWAIT2 = 21, |
| 41 | SMC_APPFINCLOSEWAIT = 24, |
| 42 | SMC_APPCLOSEWAIT1 = 22, |
| 43 | SMC_APPCLOSEWAIT2 = 23, |
| 44 | SMC_PEERFINCLOSEWAIT = 25, |
| 45 | /* abnormal close */ |
| 46 | SMC_PEERABORTWAIT = 26, |
| 47 | SMC_PROCESSABORT = 27, |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 48 | }; |
| 49 | |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 50 | struct smc_link_group; |
| 51 | |
Ursula Braun | f38ba179 | 2017-01-09 16:55:19 +0100 | [diff] [blame] | 52 | struct smc_wr_rx_hdr { /* common prefix part of LLC and CDC to demultiplex */ |
| 53 | u8 type; |
| 54 | } __aligned(1); |
| 55 | |
Ursula Braun | 5f08318 | 2017-01-09 16:55:22 +0100 | [diff] [blame] | 56 | struct smc_cdc_conn_state_flags { |
| 57 | #if defined(__BIG_ENDIAN_BITFIELD) |
| 58 | u8 peer_done_writing : 1; /* Sending done indicator */ |
| 59 | u8 peer_conn_closed : 1; /* Peer connection closed indicator */ |
| 60 | u8 peer_conn_abort : 1; /* Abnormal close indicator */ |
| 61 | u8 reserved : 5; |
| 62 | #elif defined(__LITTLE_ENDIAN_BITFIELD) |
| 63 | u8 reserved : 5; |
| 64 | u8 peer_conn_abort : 1; |
| 65 | u8 peer_conn_closed : 1; |
| 66 | u8 peer_done_writing : 1; |
| 67 | #endif |
| 68 | }; |
| 69 | |
| 70 | struct smc_cdc_producer_flags { |
| 71 | #if defined(__BIG_ENDIAN_BITFIELD) |
| 72 | u8 write_blocked : 1; /* Writing Blocked, no rx buf space */ |
| 73 | u8 urg_data_pending : 1; /* Urgent Data Pending */ |
| 74 | u8 urg_data_present : 1; /* Urgent Data Present */ |
| 75 | u8 cons_curs_upd_req : 1; /* cursor update requested */ |
| 76 | u8 failover_validation : 1;/* message replay due to failover */ |
| 77 | u8 reserved : 3; |
| 78 | #elif defined(__LITTLE_ENDIAN_BITFIELD) |
| 79 | u8 reserved : 3; |
| 80 | u8 failover_validation : 1; |
| 81 | u8 cons_curs_upd_req : 1; |
| 82 | u8 urg_data_present : 1; |
| 83 | u8 urg_data_pending : 1; |
| 84 | u8 write_blocked : 1; |
| 85 | #endif |
| 86 | }; |
| 87 | |
| 88 | /* in host byte order */ |
| 89 | union smc_host_cursor { /* SMC cursor - an offset in an RMBE */ |
| 90 | struct { |
| 91 | u16 reserved; |
| 92 | u16 wrap; /* window wrap sequence number */ |
| 93 | u32 count; /* cursor (= offset) part */ |
| 94 | }; |
| 95 | #ifdef KERNEL_HAS_ATOMIC64 |
| 96 | atomic64_t acurs; /* for atomic processing */ |
| 97 | #else |
| 98 | u64 acurs; /* for atomic processing */ |
| 99 | #endif |
| 100 | } __aligned(8); |
| 101 | |
| 102 | /* in host byte order, except for flag bitfields in network byte order */ |
| 103 | struct smc_host_cdc_msg { /* Connection Data Control message */ |
| 104 | struct smc_wr_rx_hdr common; /* .type = 0xFE */ |
| 105 | u8 len; /* length = 44 */ |
| 106 | u16 seqno; /* connection seq # */ |
| 107 | u32 token; /* alert_token */ |
| 108 | union smc_host_cursor prod; /* producer cursor */ |
| 109 | union smc_host_cursor cons; /* consumer cursor, |
| 110 | * piggy backed "ack" |
| 111 | */ |
| 112 | struct smc_cdc_producer_flags prod_flags; /* conn. tx/rx status */ |
| 113 | struct smc_cdc_conn_state_flags conn_state_flags; /* peer conn. status*/ |
| 114 | u8 reserved[18]; |
| 115 | } __aligned(8); |
| 116 | |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 117 | struct smc_connection { |
| 118 | struct rb_node alert_node; |
| 119 | struct smc_link_group *lgr; /* link group of connection */ |
| 120 | u32 alert_token_local; /* unique conn. id */ |
| 121 | u8 peer_conn_idx; /* from tcp handshake */ |
Ursula Braun | cd6851f | 2017-01-09 16:55:18 +0100 | [diff] [blame] | 122 | int peer_rmbe_size; /* size of peer rx buffer */ |
| 123 | atomic_t peer_rmbe_space;/* remaining free bytes in peer |
| 124 | * rmbe |
| 125 | */ |
Ursula Braun | bd4ad57 | 2017-01-09 16:55:20 +0100 | [diff] [blame] | 126 | int rtoken_idx; /* idx to peer RMB rkey/addr */ |
Ursula Braun | cd6851f | 2017-01-09 16:55:18 +0100 | [diff] [blame] | 127 | |
| 128 | struct smc_buf_desc *sndbuf_desc; /* send buffer descriptor */ |
| 129 | int sndbuf_size; /* sndbuf size <== sock wmem */ |
| 130 | struct smc_buf_desc *rmb_desc; /* RMBE descriptor */ |
| 131 | int rmbe_size; /* RMBE size <== sock rmem */ |
| 132 | int rmbe_size_short;/* compressed notation */ |
Ursula Braun | 952310c | 2017-01-09 16:55:24 +0100 | [diff] [blame] | 133 | int rmbe_update_limit; |
| 134 | /* lower limit for consumer |
| 135 | * cursor update |
| 136 | */ |
Ursula Braun | 5f08318 | 2017-01-09 16:55:22 +0100 | [diff] [blame] | 137 | |
| 138 | struct smc_host_cdc_msg local_tx_ctrl; /* host byte order staging |
| 139 | * buffer for CDC msg send |
| 140 | * .prod cf. TCP snd_nxt |
| 141 | * .cons cf. TCP sends ack |
| 142 | */ |
| 143 | union smc_host_cursor tx_curs_prep; /* tx - prepared data |
| 144 | * snd_max..wmem_alloc |
| 145 | */ |
| 146 | union smc_host_cursor tx_curs_sent; /* tx - sent data |
| 147 | * snd_nxt ? |
| 148 | */ |
| 149 | union smc_host_cursor tx_curs_fin; /* tx - confirmed by peer |
| 150 | * snd-wnd-begin ? |
| 151 | */ |
| 152 | atomic_t sndbuf_space; /* remaining space in sndbuf */ |
| 153 | u16 tx_cdc_seq; /* sequence # for CDC send */ |
| 154 | spinlock_t send_lock; /* protect wr_sends */ |
Ursula Braun | 18e537c | 2017-09-21 09:16:33 +0200 | [diff] [blame] | 155 | struct delayed_work tx_work; /* retry of smc_cdc_msg_send */ |
Ursula Braun | 5f08318 | 2017-01-09 16:55:22 +0100 | [diff] [blame] | 156 | |
| 157 | struct smc_host_cdc_msg local_rx_ctrl; /* filled during event_handl. |
| 158 | * .prod cf. TCP rcv_nxt |
| 159 | * .cons cf. TCP snd_una |
| 160 | */ |
| 161 | union smc_host_cursor rx_curs_confirmed; /* confirmed to peer |
| 162 | * source of snd_una ? |
| 163 | */ |
| 164 | atomic_t bytes_to_rcv; /* arrived data, |
| 165 | * not yet received |
| 166 | */ |
Stefan Raspl | 9014db2 | 2018-05-03 18:12:39 +0200 | [diff] [blame^] | 167 | atomic_t splice_pending; /* number of spliced bytes |
| 168 | * pending processing |
| 169 | */ |
Ursula Braun | 5f08318 | 2017-01-09 16:55:22 +0100 | [diff] [blame] | 170 | #ifndef KERNEL_HAS_ATOMIC64 |
| 171 | spinlock_t acurs_lock; /* protect cursors */ |
| 172 | #endif |
Ursula Braun | 46c28db | 2017-04-10 14:58:01 +0200 | [diff] [blame] | 173 | struct work_struct close_work; /* peer sent some closing */ |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 174 | }; |
| 175 | |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 176 | struct smc_sock { /* smc sock container */ |
| 177 | struct sock sk; |
| 178 | struct socket *clcsock; /* internal tcp socket */ |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 179 | struct smc_connection conn; /* smc connection */ |
Ursula Braun | a046d57 | 2017-01-09 16:55:16 +0100 | [diff] [blame] | 180 | struct smc_sock *listen_smc; /* listen parent */ |
| 181 | struct work_struct tcp_listen_work;/* handle tcp socket accepts */ |
| 182 | struct work_struct smc_listen_work;/* prepare new accept socket */ |
| 183 | struct list_head accept_q; /* sockets to be accepted */ |
| 184 | spinlock_t accept_q_lock; /* protects accept_q */ |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 185 | bool use_fallback; /* fallback to tcp */ |
Ursula Braun | abb190f | 2018-04-26 17:18:23 +0200 | [diff] [blame] | 186 | int sockopt_defer_accept; |
| 187 | /* sockopt TCP_DEFER_ACCEPT |
| 188 | * value |
| 189 | */ |
Ursula Braun | b38d732 | 2017-01-09 16:55:25 +0100 | [diff] [blame] | 190 | u8 wait_close_tx_prepared : 1; |
| 191 | /* shutdown wr or close |
| 192 | * started, waiting for unsent |
| 193 | * data to be sent |
| 194 | */ |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 195 | }; |
| 196 | |
| 197 | static inline struct smc_sock *smc_sk(const struct sock *sk) |
| 198 | { |
| 199 | return (struct smc_sock *)sk; |
| 200 | } |
| 201 | |
Ursula Braun | a4cf044 | 2017-01-09 16:55:14 +0100 | [diff] [blame] | 202 | #define SMC_SYSTEMID_LEN 8 |
| 203 | |
| 204 | extern u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */ |
| 205 | |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 206 | /* convert an u32 value into network byte order, store it into a 3 byte field */ |
| 207 | static inline void hton24(u8 *net, u32 host) |
| 208 | { |
| 209 | __be32 t; |
| 210 | |
| 211 | t = cpu_to_be32(host); |
| 212 | memcpy(net, ((u8 *)&t) + 1, 3); |
| 213 | } |
| 214 | |
| 215 | /* convert a received 3 byte field into host byte order*/ |
| 216 | static inline u32 ntoh24(u8 *net) |
| 217 | { |
| 218 | __be32 t = 0; |
| 219 | |
| 220 | memcpy(((u8 *)&t) + 1, net, 3); |
| 221 | return be32_to_cpu(t); |
| 222 | } |
| 223 | |
Ursula Braun | cd6851f | 2017-01-09 16:55:18 +0100 | [diff] [blame] | 224 | #define SMC_BUF_MIN_SIZE 16384 /* minimum size of an RMB */ |
| 225 | |
| 226 | #define SMC_RMBE_SIZES 16 /* number of distinct sizes for an RMBE */ |
| 227 | /* theoretically, the RFC states that largest size would be 512K, |
| 228 | * i.e. compressed 5 and thus 6 sizes (0..5), despite |
| 229 | * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15) |
| 230 | */ |
| 231 | |
| 232 | /* convert the RMB size into the compressed notation - minimum 16K. |
| 233 | * In contrast to plain ilog2, this rounds towards the next power of 2, |
| 234 | * so the socket application gets at least its desired sndbuf / rcvbuf size. |
| 235 | */ |
| 236 | static inline u8 smc_compress_bufsize(int size) |
| 237 | { |
| 238 | u8 compressed; |
| 239 | |
| 240 | if (size <= SMC_BUF_MIN_SIZE) |
| 241 | return 0; |
| 242 | |
| 243 | size = (size - 1) >> 14; |
| 244 | compressed = ilog2(size) + 1; |
| 245 | if (compressed >= SMC_RMBE_SIZES) |
| 246 | compressed = SMC_RMBE_SIZES - 1; |
| 247 | return compressed; |
| 248 | } |
| 249 | |
| 250 | /* convert the RMB size from compressed notation into integer */ |
| 251 | static inline int smc_uncompress_bufsize(u8 compressed) |
| 252 | { |
| 253 | u32 size; |
| 254 | |
| 255 | size = 0x00000001 << (((int)compressed) + 14); |
| 256 | return (int)size; |
| 257 | } |
| 258 | |
Ursula Braun | a046d57 | 2017-01-09 16:55:16 +0100 | [diff] [blame] | 259 | #ifdef CONFIG_XFRM |
| 260 | static inline bool using_ipsec(struct smc_sock *smc) |
| 261 | { |
| 262 | return (smc->clcsock->sk->sk_policy[0] || |
Gustavo A. R. Silva | a8fbf8e | 2018-01-26 09:28:50 +0100 | [diff] [blame] | 263 | smc->clcsock->sk->sk_policy[1]) ? true : false; |
Ursula Braun | a046d57 | 2017-01-09 16:55:16 +0100 | [diff] [blame] | 264 | } |
| 265 | #else |
| 266 | static inline bool using_ipsec(struct smc_sock *smc) |
| 267 | { |
Gustavo A. R. Silva | a8fbf8e | 2018-01-26 09:28:50 +0100 | [diff] [blame] | 268 | return false; |
Ursula Braun | a046d57 | 2017-01-09 16:55:16 +0100 | [diff] [blame] | 269 | } |
| 270 | #endif |
| 271 | |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 272 | struct smc_clc_msg_local; |
| 273 | |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 274 | void smc_conn_free(struct smc_connection *conn); |
Karsten Graul | be6d467 | 2018-03-01 13:51:28 +0100 | [diff] [blame] | 275 | int smc_conn_create(struct smc_sock *smc, |
Ursula Braun | 0cfdd8f | 2017-01-09 16:55:17 +0100 | [diff] [blame] | 276 | struct smc_ib_device *smcibdev, u8 ibport, |
| 277 | struct smc_clc_msg_local *lcl, int srv_first_contact); |
Ursula Braun | b38d732 | 2017-01-09 16:55:25 +0100 | [diff] [blame] | 278 | struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock); |
| 279 | void smc_close_non_accepted(struct sock *sk); |
Ursula Braun | a046d57 | 2017-01-09 16:55:16 +0100 | [diff] [blame] | 280 | |
Ursula Braun | ac71387 | 2017-01-09 16:55:13 +0100 | [diff] [blame] | 281 | #endif /* __SMC_H */ |