blob: 86cccc24e52e2d047c5617af4421902e8dd6c747 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Brauna046d572017-01-09 16:55:16 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * CLC (connection layer control) handshake over initial TCP socket to
6 * prepare for RDMA traffic
7 *
Karsten Graul1a26d022018-03-16 15:06:40 +01008 * Copyright IBM Corp. 2016, 2018
Ursula Brauna046d572017-01-09 16:55:16 +01009 *
10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13#include <linux/in.h>
Karsten Graul696cd302018-03-01 13:51:27 +010014#include <linux/inetdevice.h>
Ursula Braun143c0172017-01-12 14:57:15 +010015#include <linux/if_ether.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010016#include <linux/sched/signal.h>
17
Karsten Graul1a26d022018-03-16 15:06:40 +010018#include <net/addrconf.h>
Ursula Brauna046d572017-01-09 16:55:16 +010019#include <net/sock.h>
20#include <net/tcp.h>
21
22#include "smc.h"
Ursula Braun0cfdd8f2017-01-09 16:55:17 +010023#include "smc_core.h"
Ursula Brauna046d572017-01-09 16:55:16 +010024#include "smc_clc.h"
25#include "smc_ib.h"
Hans Wippelc758dfd2018-06-28 19:05:09 +020026#include "smc_ism.h"
27
28#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
29#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
Ursula Brauna046d572017-01-09 16:55:16 +010030
Stefan Raspl0f627122018-03-01 13:51:26 +010031/* eye catcher "SMCR" EBCDIC for CLC messages */
32static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
Hans Wippelc758dfd2018-06-28 19:05:09 +020033/* eye catcher "SMCD" EBCDIC for CLC messages */
34static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
Stefan Raspl0f627122018-03-01 13:51:26 +010035
Ursula Braune7b7a642017-12-07 13:38:49 +010036/* check if received message has a correct header length and contains valid
37 * heading and trailing eyecatchers
38 */
39static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
40{
41 struct smc_clc_msg_proposal_prefix *pclc_prfx;
42 struct smc_clc_msg_accept_confirm *clc;
43 struct smc_clc_msg_proposal *pclc;
44 struct smc_clc_msg_decline *dclc;
45 struct smc_clc_msg_trail *trl;
46
Hans Wippelc758dfd2018-06-28 19:05:09 +020047 if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
48 memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
Ursula Braune7b7a642017-12-07 13:38:49 +010049 return false;
50 switch (clcm->type) {
51 case SMC_CLC_PROPOSAL:
Hans Wippelc758dfd2018-06-28 19:05:09 +020052 if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
53 clcm->path != SMC_TYPE_B)
54 return false;
Ursula Braune7b7a642017-12-07 13:38:49 +010055 pclc = (struct smc_clc_msg_proposal *)clcm;
56 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
57 if (ntohs(pclc->hdr.length) !=
58 sizeof(*pclc) + ntohs(pclc->iparea_offset) +
59 sizeof(*pclc_prfx) +
60 pclc_prfx->ipv6_prefixes_cnt *
61 sizeof(struct smc_clc_ipv6_prefix) +
62 sizeof(*trl))
63 return false;
64 trl = (struct smc_clc_msg_trail *)
65 ((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
66 break;
67 case SMC_CLC_ACCEPT:
68 case SMC_CLC_CONFIRM:
Hans Wippelc758dfd2018-06-28 19:05:09 +020069 if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D)
Ursula Braune7b7a642017-12-07 13:38:49 +010070 return false;
Hans Wippelc758dfd2018-06-28 19:05:09 +020071 clc = (struct smc_clc_msg_accept_confirm *)clcm;
72 if ((clcm->path == SMC_TYPE_R &&
73 ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) ||
74 (clcm->path == SMC_TYPE_D &&
75 ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN))
76 return false;
77 trl = (struct smc_clc_msg_trail *)
78 ((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl));
Ursula Braune7b7a642017-12-07 13:38:49 +010079 break;
80 case SMC_CLC_DECLINE:
81 dclc = (struct smc_clc_msg_decline *)clcm;
82 if (ntohs(dclc->hdr.length) != sizeof(*dclc))
83 return false;
84 trl = &dclc->trl;
85 break;
86 default:
87 return false;
88 }
Hans Wippelc758dfd2018-06-28 19:05:09 +020089 if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
90 memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
Ursula Braune7b7a642017-12-07 13:38:49 +010091 return false;
92 return true;
93}
94
Karsten Graulc246d942018-03-16 15:06:39 +010095/* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */
96static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
97 struct smc_clc_msg_proposal_prefix *prop)
98{
99 struct in_device *in_dev = __in_dev_get_rcu(dst->dev);
Florian Westphalcd5a4112019-05-31 18:27:07 +0200100 const struct in_ifaddr *ifa;
Karsten Graulc246d942018-03-16 15:06:39 +0100101
102 if (!in_dev)
103 return -ENODEV;
Florian Westphalcd5a4112019-05-31 18:27:07 +0200104
105 in_dev_for_each_ifa_rcu(ifa, in_dev) {
Karsten Graulc246d942018-03-16 15:06:39 +0100106 if (!inet_ifa_match(ipv4, ifa))
107 continue;
108 prop->prefix_len = inet_mask_len(ifa->ifa_mask);
109 prop->outgoing_subnet = ifa->ifa_address & ifa->ifa_mask;
110 /* prop->ipv6_prefixes_cnt = 0; already done by memset before */
111 return 0;
Florian Westphalcd5a4112019-05-31 18:27:07 +0200112 }
Karsten Graulc246d942018-03-16 15:06:39 +0100113 return -ENOENT;
114}
115
Karsten Graul1a26d022018-03-16 15:06:40 +0100116/* fill CLC proposal msg with ipv6 prefixes from device */
117static int smc_clc_prfx_set6_rcu(struct dst_entry *dst,
118 struct smc_clc_msg_proposal_prefix *prop,
119 struct smc_clc_ipv6_prefix *ipv6_prfx)
120{
121#if IS_ENABLED(CONFIG_IPV6)
122 struct inet6_dev *in6_dev = __in6_dev_get(dst->dev);
123 struct inet6_ifaddr *ifa;
124 int cnt = 0;
125
126 if (!in6_dev)
127 return -ENODEV;
128 /* use a maximum of 8 IPv6 prefixes from device */
129 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
130 if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
131 continue;
132 ipv6_addr_prefix(&ipv6_prfx[cnt].prefix,
133 &ifa->addr, ifa->prefix_len);
134 ipv6_prfx[cnt].prefix_len = ifa->prefix_len;
135 cnt++;
136 if (cnt == SMC_CLC_MAX_V6_PREFIX)
137 break;
138 }
139 prop->ipv6_prefixes_cnt = cnt;
140 if (cnt)
141 return 0;
142#endif
143 return -ENOENT;
144}
145
Karsten Graulc246d942018-03-16 15:06:39 +0100146/* retrieve and set prefixes in CLC proposal msg */
147static int smc_clc_prfx_set(struct socket *clcsock,
Karsten Graul1a26d022018-03-16 15:06:40 +0100148 struct smc_clc_msg_proposal_prefix *prop,
149 struct smc_clc_ipv6_prefix *ipv6_prfx)
Karsten Graul696cd302018-03-01 13:51:27 +0100150{
151 struct dst_entry *dst = sk_dst_get(clcsock->sk);
Karsten Graulc246d942018-03-16 15:06:39 +0100152 struct sockaddr_storage addrs;
Karsten Graul1a26d022018-03-16 15:06:40 +0100153 struct sockaddr_in6 *addr6;
Karsten Graulc246d942018-03-16 15:06:39 +0100154 struct sockaddr_in *addr;
155 int rc = -ENOENT;
156
157 memset(prop, 0, sizeof(*prop));
158 if (!dst) {
159 rc = -ENOTCONN;
160 goto out;
161 }
162 if (!dst->dev) {
163 rc = -ENODEV;
164 goto out_rel;
165 }
166 /* get address to which the internal TCP socket is bound */
167 kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
168 /* analyze IP specific data of net_device belonging to TCP socket */
Karsten Graul1a26d022018-03-16 15:06:40 +0100169 addr6 = (struct sockaddr_in6 *)&addrs;
Karsten Graulc246d942018-03-16 15:06:39 +0100170 rcu_read_lock();
171 if (addrs.ss_family == PF_INET) {
172 /* IPv4 */
173 addr = (struct sockaddr_in *)&addrs;
174 rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop);
Karsten Graul1a26d022018-03-16 15:06:40 +0100175 } else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) {
176 /* mapped IPv4 address - peer is IPv4 only */
177 rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3],
178 prop);
179 } else {
180 /* IPv6 */
181 rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx);
Karsten Graulc246d942018-03-16 15:06:39 +0100182 }
183 rcu_read_unlock();
184out_rel:
185 dst_release(dst);
186out:
187 return rc;
188}
189
190/* match ipv4 addrs of dev against addr in CLC proposal */
191static int smc_clc_prfx_match4_rcu(struct net_device *dev,
192 struct smc_clc_msg_proposal_prefix *prop)
193{
194 struct in_device *in_dev = __in_dev_get_rcu(dev);
Florian Westphalcd5a4112019-05-31 18:27:07 +0200195 const struct in_ifaddr *ifa;
Karsten Graulc246d942018-03-16 15:06:39 +0100196
197 if (!in_dev)
198 return -ENODEV;
Florian Westphalcd5a4112019-05-31 18:27:07 +0200199 in_dev_for_each_ifa_rcu(ifa, in_dev) {
Karsten Graulc246d942018-03-16 15:06:39 +0100200 if (prop->prefix_len == inet_mask_len(ifa->ifa_mask) &&
201 inet_ifa_match(prop->outgoing_subnet, ifa))
202 return 0;
Florian Westphalcd5a4112019-05-31 18:27:07 +0200203 }
Karsten Graulc246d942018-03-16 15:06:39 +0100204
205 return -ENOENT;
206}
207
Karsten Graul1a26d022018-03-16 15:06:40 +0100208/* match ipv6 addrs of dev against addrs in CLC proposal */
209static int smc_clc_prfx_match6_rcu(struct net_device *dev,
210 struct smc_clc_msg_proposal_prefix *prop)
211{
212#if IS_ENABLED(CONFIG_IPV6)
213 struct inet6_dev *in6_dev = __in6_dev_get(dev);
214 struct smc_clc_ipv6_prefix *ipv6_prfx;
215 struct inet6_ifaddr *ifa;
216 int i, max;
217
218 if (!in6_dev)
219 return -ENODEV;
220 /* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */
221 ipv6_prfx = (struct smc_clc_ipv6_prefix *)((u8 *)prop + sizeof(*prop));
222 max = min_t(u8, prop->ipv6_prefixes_cnt, SMC_CLC_MAX_V6_PREFIX);
223 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
224 if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
225 continue;
226 for (i = 0; i < max; i++) {
227 if (ifa->prefix_len == ipv6_prfx[i].prefix_len &&
228 ipv6_prefix_equal(&ifa->addr, &ipv6_prfx[i].prefix,
229 ifa->prefix_len))
230 return 0;
231 }
232 }
233#endif
234 return -ENOENT;
235}
236
Karsten Graulc246d942018-03-16 15:06:39 +0100237/* check if proposed prefixes match one of our device prefixes */
238int smc_clc_prfx_match(struct socket *clcsock,
239 struct smc_clc_msg_proposal_prefix *prop)
240{
241 struct dst_entry *dst = sk_dst_get(clcsock->sk);
Karsten Graul1a26d022018-03-16 15:06:40 +0100242 int rc;
Karsten Graul696cd302018-03-01 13:51:27 +0100243
244 if (!dst) {
245 rc = -ENOTCONN;
246 goto out;
247 }
248 if (!dst->dev) {
249 rc = -ENODEV;
250 goto out_rel;
251 }
Karsten Graul696cd302018-03-01 13:51:27 +0100252 rcu_read_lock();
Karsten Graulc246d942018-03-16 15:06:39 +0100253 if (!prop->ipv6_prefixes_cnt)
254 rc = smc_clc_prfx_match4_rcu(dst->dev, prop);
Karsten Graul1a26d022018-03-16 15:06:40 +0100255 else
256 rc = smc_clc_prfx_match6_rcu(dst->dev, prop);
Karsten Graul696cd302018-03-01 13:51:27 +0100257 rcu_read_unlock();
Karsten Graul696cd302018-03-01 13:51:27 +0100258out_rel:
259 dst_release(dst);
260out:
261 return rc;
262}
263
Ursula Brauna046d572017-01-09 16:55:16 +0100264/* Wait for data on the tcp-socket, analyze received data
265 * Returns:
266 * 0 if success and it was not a decline that we received.
267 * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send.
268 * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
269 */
270int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
Ursula Braun2b59f582018-11-22 10:26:39 +0100271 u8 expected_type, unsigned long timeout)
Ursula Brauna046d572017-01-09 16:55:16 +0100272{
Karsten Graulf6bdc422018-07-18 15:22:51 +0200273 long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
Ursula Brauna046d572017-01-09 16:55:16 +0100274 struct sock *clc_sk = smc->clcsock->sk;
275 struct smc_clc_msg_hdr *clcm = buf;
276 struct msghdr msg = {NULL, 0};
277 int reason_code = 0;
Al Virod63d2712017-09-20 20:21:22 -0400278 struct kvec vec = {buf, buflen};
Ursula Brauna046d572017-01-09 16:55:16 +0100279 int len, datlen;
280 int krflags;
281
282 /* peek the first few bytes to determine length of data to receive
283 * so we don't consume any subsequent CLC message or payload data
284 * in the TCP byte stream
285 */
Al Virod63d2712017-09-20 20:21:22 -0400286 /*
287 * Caller must make sure that buflen is no less than
288 * sizeof(struct smc_clc_msg_hdr)
289 */
Ursula Brauna046d572017-01-09 16:55:16 +0100290 krflags = MSG_PEEK | MSG_WAITALL;
Ursula Braun2b59f582018-11-22 10:26:39 +0100291 clc_sk->sk_rcvtimeo = timeout;
David Howellsaa563d72018-10-20 00:57:56 +0100292 iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
Al Virod63d2712017-09-20 20:21:22 -0400293 sizeof(struct smc_clc_msg_hdr));
294 len = sock_recvmsg(smc->clcsock, &msg, krflags);
Ursula Brauna046d572017-01-09 16:55:16 +0100295 if (signal_pending(current)) {
296 reason_code = -EINTR;
297 clc_sk->sk_err = EINTR;
298 smc->sk.sk_err = EINTR;
299 goto out;
300 }
301 if (clc_sk->sk_err) {
302 reason_code = -clc_sk->sk_err;
Ursula Braun9ed28552018-11-22 10:26:37 +0100303 if (clc_sk->sk_err == EAGAIN &&
304 expected_type == SMC_CLC_DECLINE)
305 clc_sk->sk_err = 0; /* reset for fallback usage */
306 else
307 smc->sk.sk_err = clc_sk->sk_err;
Ursula Brauna046d572017-01-09 16:55:16 +0100308 goto out;
309 }
310 if (!len) { /* peer has performed orderly shutdown */
311 smc->sk.sk_err = ECONNRESET;
312 reason_code = -ECONNRESET;
313 goto out;
314 }
315 if (len < 0) {
Ursula Braun9ed28552018-11-22 10:26:37 +0100316 if (len != -EAGAIN || expected_type != SMC_CLC_DECLINE)
317 smc->sk.sk_err = -len;
Ursula Brauna046d572017-01-09 16:55:16 +0100318 reason_code = len;
319 goto out;
320 }
321 datlen = ntohs(clcm->length);
322 if ((len < sizeof(struct smc_clc_msg_hdr)) ||
Ursula Braune7b7a642017-12-07 13:38:49 +0100323 (datlen > buflen) ||
Hans Wippelc758dfd2018-06-28 19:05:09 +0200324 (clcm->version != SMC_CLC_V1) ||
325 (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
326 clcm->path != SMC_TYPE_B) ||
Ursula Brauna046d572017-01-09 16:55:16 +0100327 ((clcm->type != SMC_CLC_DECLINE) &&
328 (clcm->type != expected_type))) {
329 smc->sk.sk_err = EPROTO;
330 reason_code = -EPROTO;
331 goto out;
332 }
333
334 /* receive the complete CLC message */
Ursula Brauna046d572017-01-09 16:55:16 +0100335 memset(&msg, 0, sizeof(struct msghdr));
David Howellsaa563d72018-10-20 00:57:56 +0100336 iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
Ursula Brauna046d572017-01-09 16:55:16 +0100337 krflags = MSG_WAITALL;
Al Virod63d2712017-09-20 20:21:22 -0400338 len = sock_recvmsg(smc->clcsock, &msg, krflags);
Ursula Braune7b7a642017-12-07 13:38:49 +0100339 if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
Ursula Brauna046d572017-01-09 16:55:16 +0100340 smc->sk.sk_err = EPROTO;
341 reason_code = -EPROTO;
342 goto out;
343 }
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100344 if (clcm->type == SMC_CLC_DECLINE) {
Karsten Graul603cc142018-07-25 16:35:32 +0200345 struct smc_clc_msg_decline *dclc;
346
347 dclc = (struct smc_clc_msg_decline *)clcm;
348 reason_code = SMC_CLC_DECL_PEERDECL;
349 smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200350 if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
Karsten Graul517c3002018-05-15 17:05:03 +0200351 smc->conn.lgr->sync_err = 1;
Ursula Braun5421ec22019-11-14 13:02:42 +0100352 smc_lgr_terminate(smc->conn.lgr, true);
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200353 }
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100354 }
355
Ursula Brauna046d572017-01-09 16:55:16 +0100356out:
Ursula Braun2b59f582018-11-22 10:26:39 +0100357 clc_sk->sk_rcvtimeo = rcvtimeo;
Ursula Brauna046d572017-01-09 16:55:16 +0100358 return reason_code;
359}
360
361/* send CLC DECLINE message across internal TCP socket */
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200362int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
Ursula Brauna046d572017-01-09 16:55:16 +0100363{
364 struct smc_clc_msg_decline dclc;
365 struct msghdr msg;
366 struct kvec vec;
367 int len;
368
369 memset(&dclc, 0, sizeof(dclc));
370 memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
371 dclc.hdr.type = SMC_CLC_DECLINE;
372 dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
373 dclc.hdr.version = SMC_CLC_V1;
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200374 dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
Ursula Braun369537c2020-02-14 08:59:00 +0100375 if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
376 memcpy(dclc.id_for_peer, local_systemid,
377 sizeof(local_systemid));
Ursula Brauna046d572017-01-09 16:55:16 +0100378 dclc.peer_diagnosis = htonl(peer_diag_info);
379 memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
380
381 memset(&msg, 0, sizeof(msg));
382 vec.iov_base = &dclc;
383 vec.iov_len = sizeof(struct smc_clc_msg_decline);
384 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
385 sizeof(struct smc_clc_msg_decline));
Ursula Braun14d22d42019-01-30 18:51:00 +0100386 if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
Ursula Braun6ae36bf2018-11-22 10:26:36 +0100387 len = -EPROTO;
388 return len > 0 ? 0 : len;
Ursula Brauna046d572017-01-09 16:55:16 +0100389}
390
391/* send CLC PROPOSAL message across internal TCP socket */
Hans Wippelc758dfd2018-06-28 19:05:09 +0200392int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
Karsten Graulbc36d2f2019-04-12 12:57:26 +0200393 struct smc_init_info *ini)
Ursula Brauna046d572017-01-09 16:55:16 +0100394{
Karsten Graul1a26d022018-03-16 15:06:40 +0100395 struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX];
Ursula Braune7b7a642017-12-07 13:38:49 +0100396 struct smc_clc_msg_proposal_prefix pclc_prfx;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200397 struct smc_clc_msg_smcd pclc_smcd;
Ursula Brauna046d572017-01-09 16:55:16 +0100398 struct smc_clc_msg_proposal pclc;
Ursula Braune7b7a642017-12-07 13:38:49 +0100399 struct smc_clc_msg_trail trl;
Karsten Graul1a26d022018-03-16 15:06:40 +0100400 int len, i, plen, rc;
Ursula Brauna046d572017-01-09 16:55:16 +0100401 int reason_code = 0;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200402 struct kvec vec[5];
Ursula Brauna046d572017-01-09 16:55:16 +0100403 struct msghdr msg;
Ursula Brauna046d572017-01-09 16:55:16 +0100404
Karsten Graulc246d942018-03-16 15:06:39 +0100405 /* retrieve ip prefixes for CLC proposal msg */
Karsten Graul1a26d022018-03-16 15:06:40 +0100406 rc = smc_clc_prfx_set(smc->clcsock, &pclc_prfx, ipv6_prfx);
Karsten Graulc246d942018-03-16 15:06:39 +0100407 if (rc)
408 return SMC_CLC_DECL_CNFERR; /* configuration error */
409
Ursula Brauna046d572017-01-09 16:55:16 +0100410 /* send SMC Proposal CLC message */
Karsten Graul1a26d022018-03-16 15:06:40 +0100411 plen = sizeof(pclc) + sizeof(pclc_prfx) +
412 (pclc_prfx.ipv6_prefixes_cnt * sizeof(ipv6_prfx[0])) +
413 sizeof(trl);
Ursula Brauna046d572017-01-09 16:55:16 +0100414 memset(&pclc, 0, sizeof(pclc));
415 memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
416 pclc.hdr.type = SMC_CLC_PROPOSAL;
Ursula Brauna046d572017-01-09 16:55:16 +0100417 pclc.hdr.version = SMC_CLC_V1; /* SMC version */
Hans Wippelc758dfd2018-06-28 19:05:09 +0200418 pclc.hdr.path = smc_type;
419 if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) {
420 /* add SMC-R specifics */
421 memcpy(pclc.lcl.id_for_peer, local_systemid,
422 sizeof(local_systemid));
Karsten Graulbc36d2f2019-04-12 12:57:26 +0200423 memcpy(&pclc.lcl.gid, ini->ib_gid, SMC_GID_SIZE);
424 memcpy(&pclc.lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1],
425 ETH_ALEN);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200426 pclc.iparea_offset = htons(0);
427 }
428 if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
429 /* add SMC-D specifics */
430 memset(&pclc_smcd, 0, sizeof(pclc_smcd));
431 plen += sizeof(pclc_smcd);
432 pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET);
Karsten Graulbc36d2f2019-04-12 12:57:26 +0200433 pclc_smcd.gid = ini->ism_dev->local_gid;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200434 }
435 pclc.hdr.length = htons(plen);
Ursula Brauna046d572017-01-09 16:55:16 +0100436
Ursula Braune7b7a642017-12-07 13:38:49 +0100437 memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
Ursula Brauna046d572017-01-09 16:55:16 +0100438 memset(&msg, 0, sizeof(msg));
Karsten Graul1a26d022018-03-16 15:06:40 +0100439 i = 0;
440 vec[i].iov_base = &pclc;
441 vec[i++].iov_len = sizeof(pclc);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200442 if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
443 vec[i].iov_base = &pclc_smcd;
444 vec[i++].iov_len = sizeof(pclc_smcd);
445 }
Karsten Graul1a26d022018-03-16 15:06:40 +0100446 vec[i].iov_base = &pclc_prfx;
447 vec[i++].iov_len = sizeof(pclc_prfx);
448 if (pclc_prfx.ipv6_prefixes_cnt > 0) {
449 vec[i].iov_base = &ipv6_prfx[0];
450 vec[i++].iov_len = pclc_prfx.ipv6_prefixes_cnt *
451 sizeof(ipv6_prfx[0]);
452 }
453 vec[i].iov_base = &trl;
454 vec[i++].iov_len = sizeof(trl);
Ursula Brauna046d572017-01-09 16:55:16 +0100455 /* due to the few bytes needed for clc-handshake this cannot block */
Karsten Graul1a26d022018-03-16 15:06:40 +0100456 len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
YueHaibing38189772018-09-18 15:46:38 +0200457 if (len < 0) {
458 smc->sk.sk_err = smc->clcsock->sk->sk_err;
459 reason_code = -smc->sk.sk_err;
460 } else if (len < (int)sizeof(pclc)) {
461 reason_code = -ENETUNREACH;
462 smc->sk.sk_err = -reason_code;
Ursula Brauna046d572017-01-09 16:55:16 +0100463 }
464
465 return reason_code;
466}
467
468/* send CLC CONFIRM message across internal TCP socket */
469int smc_clc_send_confirm(struct smc_sock *smc)
470{
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100471 struct smc_connection *conn = &smc->conn;
Ursula Brauna046d572017-01-09 16:55:16 +0100472 struct smc_clc_msg_accept_confirm cclc;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100473 struct smc_link *link;
Ursula Brauna046d572017-01-09 16:55:16 +0100474 int reason_code = 0;
475 struct msghdr msg;
476 struct kvec vec;
477 int len;
478
479 /* send SMC Confirm CLC msg */
480 memset(&cclc, 0, sizeof(cclc));
Ursula Brauna046d572017-01-09 16:55:16 +0100481 cclc.hdr.type = SMC_CLC_CONFIRM;
Ursula Brauna046d572017-01-09 16:55:16 +0100482 cclc.hdr.version = SMC_CLC_V1; /* SMC version */
Hans Wippelc758dfd2018-06-28 19:05:09 +0200483 if (smc->conn.lgr->is_smcd) {
484 /* SMC-D specific settings */
485 memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER,
486 sizeof(SMCD_EYECATCHER));
487 cclc.hdr.path = SMC_TYPE_D;
488 cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
489 cclc.gid = conn->lgr->smcd->local_gid;
490 cclc.token = conn->rmb_desc->token;
491 cclc.dmbe_size = conn->rmbe_size_short;
492 cclc.dmbe_idx = 0;
493 memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
494 memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
495 sizeof(SMCD_EYECATCHER));
496 } else {
497 /* SMC-R specific settings */
498 link = &conn->lgr->lnk[SMC_SINGLE_LINK];
499 memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER,
500 sizeof(SMC_EYECATCHER));
501 cclc.hdr.path = SMC_TYPE_R;
502 cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
503 memcpy(cclc.lcl.id_for_peer, local_systemid,
504 sizeof(local_systemid));
Ursula Braun7005ada2018-07-25 16:35:31 +0200505 memcpy(&cclc.lcl.gid, link->gid, SMC_GID_SIZE);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200506 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
507 ETH_ALEN);
508 hton24(cclc.qpn, link->roce_qp->qp_num);
509 cclc.rmb_rkey =
510 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
511 cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
512 cclc.rmbe_alert_token = htonl(conn->alert_token_local);
513 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
514 cclc.rmbe_size = conn->rmbe_size_short;
515 cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
516 (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
517 hton24(cclc.psn, link->psn_initial);
518 memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
519 sizeof(SMC_EYECATCHER));
520 }
Ursula Brauna046d572017-01-09 16:55:16 +0100521
522 memset(&msg, 0, sizeof(msg));
523 vec.iov_base = &cclc;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200524 vec.iov_len = ntohs(cclc.hdr.length);
525 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
526 ntohs(cclc.hdr.length));
527 if (len < ntohs(cclc.hdr.length)) {
Ursula Brauna046d572017-01-09 16:55:16 +0100528 if (len >= 0) {
529 reason_code = -ENETUNREACH;
530 smc->sk.sk_err = -reason_code;
531 } else {
532 smc->sk.sk_err = smc->clcsock->sk->sk_err;
533 reason_code = -smc->sk.sk_err;
534 }
535 }
536 return reason_code;
537}
538
539/* send CLC ACCEPT message across internal TCP socket */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100540int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
Ursula Brauna046d572017-01-09 16:55:16 +0100541{
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100542 struct smc_connection *conn = &new_smc->conn;
Ursula Brauna046d572017-01-09 16:55:16 +0100543 struct smc_clc_msg_accept_confirm aclc;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100544 struct smc_link *link;
Ursula Brauna046d572017-01-09 16:55:16 +0100545 struct msghdr msg;
546 struct kvec vec;
Ursula Brauna046d572017-01-09 16:55:16 +0100547 int len;
548
549 memset(&aclc, 0, sizeof(aclc));
Ursula Brauna046d572017-01-09 16:55:16 +0100550 aclc.hdr.type = SMC_CLC_ACCEPT;
Ursula Brauna046d572017-01-09 16:55:16 +0100551 aclc.hdr.version = SMC_CLC_V1; /* SMC version */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100552 if (srv_first_contact)
553 aclc.hdr.flag = 1;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200554
555 if (new_smc->conn.lgr->is_smcd) {
556 /* SMC-D specific settings */
557 aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
558 memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER,
559 sizeof(SMCD_EYECATCHER));
560 aclc.hdr.path = SMC_TYPE_D;
561 aclc.gid = conn->lgr->smcd->local_gid;
562 aclc.token = conn->rmb_desc->token;
563 aclc.dmbe_size = conn->rmbe_size_short;
564 aclc.dmbe_idx = 0;
565 memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
566 memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
567 sizeof(SMCD_EYECATCHER));
568 } else {
569 /* SMC-R specific settings */
570 aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
571 memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER,
572 sizeof(SMC_EYECATCHER));
573 aclc.hdr.path = SMC_TYPE_R;
574 link = &conn->lgr->lnk[SMC_SINGLE_LINK];
575 memcpy(aclc.lcl.id_for_peer, local_systemid,
576 sizeof(local_systemid));
Ursula Braun7005ada2018-07-25 16:35:31 +0200577 memcpy(&aclc.lcl.gid, link->gid, SMC_GID_SIZE);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200578 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1],
579 ETH_ALEN);
580 hton24(aclc.qpn, link->roce_qp->qp_num);
581 aclc.rmb_rkey =
582 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
583 aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */
584 aclc.rmbe_alert_token = htonl(conn->alert_token_local);
585 aclc.qp_mtu = link->path_mtu;
586 aclc.rmbe_size = conn->rmbe_size_short,
587 aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
588 (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
589 hton24(aclc.psn, link->psn_initial);
590 memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
591 sizeof(SMC_EYECATCHER));
592 }
Ursula Brauna046d572017-01-09 16:55:16 +0100593
594 memset(&msg, 0, sizeof(msg));
595 vec.iov_base = &aclc;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200596 vec.iov_len = ntohs(aclc.hdr.length);
597 len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1,
598 ntohs(aclc.hdr.length));
Ursula Braun6ae36bf2018-11-22 10:26:36 +0100599 if (len < ntohs(aclc.hdr.length))
600 len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
Ursula Brauna046d572017-01-09 16:55:16 +0100601
Ursula Braun6ae36bf2018-11-22 10:26:36 +0100602 return len > 0 ? 0 : len;
Ursula Brauna046d572017-01-09 16:55:16 +0100603}