blob: 5ee5b2ce29a6e2c99d4f745b6cd435079a385489 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Brauna046d572017-01-09 16:55:16 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * CLC (connection layer control) handshake over initial TCP socket to
6 * prepare for RDMA traffic
7 *
Karsten Graul1a26d022018-03-16 15:06:40 +01008 * Copyright IBM Corp. 2016, 2018
Ursula Brauna046d572017-01-09 16:55:16 +01009 *
10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13#include <linux/in.h>
Karsten Graul696cd302018-03-01 13:51:27 +010014#include <linux/inetdevice.h>
Ursula Braun143c0172017-01-12 14:57:15 +010015#include <linux/if_ether.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010016#include <linux/sched/signal.h>
Ursula Braunb81a5eb2020-09-26 12:44:31 +020017#include <linux/utsname.h>
18#include <linux/ctype.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010019
Karsten Graul1a26d022018-03-16 15:06:40 +010020#include <net/addrconf.h>
Ursula Brauna046d572017-01-09 16:55:16 +010021#include <net/sock.h>
22#include <net/tcp.h>
23
24#include "smc.h"
Ursula Braun0cfdd8f2017-01-09 16:55:17 +010025#include "smc_core.h"
Ursula Brauna046d572017-01-09 16:55:16 +010026#include "smc_clc.h"
27#include "smc_ib.h"
Hans Wippelc758dfd2018-06-28 19:05:09 +020028#include "smc_ism.h"
29
30#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
31#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
Ursula Brauna7c9c5f2020-09-26 12:44:30 +020032#define SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 78
Ursula Braunfb4f7922020-07-08 17:05:15 +020033#define SMC_CLC_RECV_BUF_LEN 100
Ursula Brauna046d572017-01-09 16:55:16 +010034
Stefan Raspl0f627122018-03-01 13:51:26 +010035/* eye catcher "SMCR" EBCDIC for CLC messages */
36static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
Hans Wippelc758dfd2018-06-28 19:05:09 +020037/* eye catcher "SMCD" EBCDIC for CLC messages */
38static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
Stefan Raspl0f627122018-03-01 13:51:26 +010039
Ursula Braunb81a5eb2020-09-26 12:44:31 +020040static u8 smc_hostname[SMC_MAX_HOSTNAME_LEN];
41
Ursula Braun8c3dca32020-09-26 12:44:28 +020042/* check arriving CLC proposal */
43static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
44{
45 struct smc_clc_msg_proposal_prefix *pclc_prfx;
46 struct smc_clc_smcd_v2_extension *smcd_v2_ext;
47 struct smc_clc_msg_hdr *hdr = &pclc->hdr;
48 struct smc_clc_v2_extension *v2_ext;
49
50 v2_ext = smc_get_clc_v2_ext(pclc);
51 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
52 if (hdr->version == SMC_V1) {
53 if (hdr->typev1 == SMC_TYPE_N)
54 return false;
55 if (ntohs(hdr->length) !=
56 sizeof(*pclc) + ntohs(pclc->iparea_offset) +
57 sizeof(*pclc_prfx) +
58 pclc_prfx->ipv6_prefixes_cnt *
59 sizeof(struct smc_clc_ipv6_prefix) +
60 sizeof(struct smc_clc_msg_trail))
61 return false;
62 } else {
63 if (ntohs(hdr->length) !=
64 sizeof(*pclc) +
65 sizeof(struct smc_clc_msg_smcd) +
66 (hdr->typev1 != SMC_TYPE_N ?
67 sizeof(*pclc_prfx) +
68 pclc_prfx->ipv6_prefixes_cnt *
69 sizeof(struct smc_clc_ipv6_prefix) : 0) +
70 (hdr->typev2 != SMC_TYPE_N ?
71 sizeof(*v2_ext) +
72 v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN : 0) +
73 (smcd_indicated(hdr->typev2) ?
74 sizeof(*smcd_v2_ext) + v2_ext->hdr.ism_gid_cnt *
75 sizeof(struct smc_clc_smcd_gid_chid) :
76 0) +
77 sizeof(struct smc_clc_msg_trail))
78 return false;
79 }
80 return true;
81}
82
Ursula Brauna7c9c5f2020-09-26 12:44:30 +020083/* check arriving CLC accept or confirm */
84static bool
85smc_clc_msg_acc_conf_valid(struct smc_clc_msg_accept_confirm_v2 *clc_v2)
86{
87 struct smc_clc_msg_hdr *hdr = &clc_v2->hdr;
88
89 if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D)
90 return false;
91 if (hdr->version == SMC_V1) {
92 if ((hdr->typev1 == SMC_TYPE_R &&
93 ntohs(hdr->length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) ||
94 (hdr->typev1 == SMC_TYPE_D &&
95 ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN))
96 return false;
97 } else {
98 if (hdr->typev1 == SMC_TYPE_D &&
Ursula Braunb81a5eb2020-09-26 12:44:31 +020099 ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 &&
100 (ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 +
101 sizeof(struct smc_clc_first_contact_ext)))
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200102 return false;
103 }
104 return true;
105}
106
Ursula Braunb81a5eb2020-09-26 12:44:31 +0200107static void smc_clc_fill_fce(struct smc_clc_first_contact_ext *fce, int *len)
108{
109 memset(fce, 0, sizeof(*fce));
110 fce->os_type = SMC_CLC_OS_LINUX;
111 fce->release = SMC_RELEASE;
112 memcpy(fce->hostname, smc_hostname, sizeof(smc_hostname));
113 (*len) += sizeof(*fce);
114}
115
Ursula Braune7b7a642017-12-07 13:38:49 +0100116/* check if received message has a correct header length and contains valid
117 * heading and trailing eyecatchers
118 */
Ursula Braunfb4f7922020-07-08 17:05:15 +0200119static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
Ursula Braune7b7a642017-12-07 13:38:49 +0100120{
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200121 struct smc_clc_msg_accept_confirm_v2 *clc_v2;
Ursula Braune7b7a642017-12-07 13:38:49 +0100122 struct smc_clc_msg_proposal *pclc;
123 struct smc_clc_msg_decline *dclc;
124 struct smc_clc_msg_trail *trl;
125
Hans Wippelc758dfd2018-06-28 19:05:09 +0200126 if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
127 memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
Ursula Braune7b7a642017-12-07 13:38:49 +0100128 return false;
129 switch (clcm->type) {
130 case SMC_CLC_PROPOSAL:
131 pclc = (struct smc_clc_msg_proposal *)clcm;
Ursula Braun8c3dca32020-09-26 12:44:28 +0200132 if (!smc_clc_msg_prop_valid(pclc))
Ursula Braune7b7a642017-12-07 13:38:49 +0100133 return false;
134 trl = (struct smc_clc_msg_trail *)
135 ((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
136 break;
137 case SMC_CLC_ACCEPT:
138 case SMC_CLC_CONFIRM:
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200139 clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)clcm;
140 if (!smc_clc_msg_acc_conf_valid(clc_v2))
Hans Wippelc758dfd2018-06-28 19:05:09 +0200141 return false;
142 trl = (struct smc_clc_msg_trail *)
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200143 ((u8 *)clc_v2 + ntohs(clc_v2->hdr.length) -
144 sizeof(*trl));
Ursula Braune7b7a642017-12-07 13:38:49 +0100145 break;
146 case SMC_CLC_DECLINE:
147 dclc = (struct smc_clc_msg_decline *)clcm;
148 if (ntohs(dclc->hdr.length) != sizeof(*dclc))
149 return false;
150 trl = &dclc->trl;
151 break;
152 default:
153 return false;
154 }
Ursula Braunfb4f7922020-07-08 17:05:15 +0200155 if (check_trl &&
156 memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
Hans Wippelc758dfd2018-06-28 19:05:09 +0200157 memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
Ursula Braune7b7a642017-12-07 13:38:49 +0100158 return false;
159 return true;
160}
161
Karsten Graulc246d942018-03-16 15:06:39 +0100162/* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */
163static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
164 struct smc_clc_msg_proposal_prefix *prop)
165{
166 struct in_device *in_dev = __in_dev_get_rcu(dst->dev);
Florian Westphalcd5a4112019-05-31 18:27:07 +0200167 const struct in_ifaddr *ifa;
Karsten Graulc246d942018-03-16 15:06:39 +0100168
169 if (!in_dev)
170 return -ENODEV;
Florian Westphalcd5a4112019-05-31 18:27:07 +0200171
172 in_dev_for_each_ifa_rcu(ifa, in_dev) {
Karsten Graulc246d942018-03-16 15:06:39 +0100173 if (!inet_ifa_match(ipv4, ifa))
174 continue;
175 prop->prefix_len = inet_mask_len(ifa->ifa_mask);
176 prop->outgoing_subnet = ifa->ifa_address & ifa->ifa_mask;
177 /* prop->ipv6_prefixes_cnt = 0; already done by memset before */
178 return 0;
Florian Westphalcd5a4112019-05-31 18:27:07 +0200179 }
Karsten Graulc246d942018-03-16 15:06:39 +0100180 return -ENOENT;
181}
182
Karsten Graul1a26d022018-03-16 15:06:40 +0100183/* fill CLC proposal msg with ipv6 prefixes from device */
184static int smc_clc_prfx_set6_rcu(struct dst_entry *dst,
185 struct smc_clc_msg_proposal_prefix *prop,
186 struct smc_clc_ipv6_prefix *ipv6_prfx)
187{
188#if IS_ENABLED(CONFIG_IPV6)
189 struct inet6_dev *in6_dev = __in6_dev_get(dst->dev);
190 struct inet6_ifaddr *ifa;
191 int cnt = 0;
192
193 if (!in6_dev)
194 return -ENODEV;
195 /* use a maximum of 8 IPv6 prefixes from device */
196 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
197 if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
198 continue;
199 ipv6_addr_prefix(&ipv6_prfx[cnt].prefix,
200 &ifa->addr, ifa->prefix_len);
201 ipv6_prfx[cnt].prefix_len = ifa->prefix_len;
202 cnt++;
203 if (cnt == SMC_CLC_MAX_V6_PREFIX)
204 break;
205 }
206 prop->ipv6_prefixes_cnt = cnt;
207 if (cnt)
208 return 0;
209#endif
210 return -ENOENT;
211}
212
Karsten Graulc246d942018-03-16 15:06:39 +0100213/* retrieve and set prefixes in CLC proposal msg */
214static int smc_clc_prfx_set(struct socket *clcsock,
Karsten Graul1a26d022018-03-16 15:06:40 +0100215 struct smc_clc_msg_proposal_prefix *prop,
216 struct smc_clc_ipv6_prefix *ipv6_prfx)
Karsten Graul696cd302018-03-01 13:51:27 +0100217{
218 struct dst_entry *dst = sk_dst_get(clcsock->sk);
Karsten Graulc246d942018-03-16 15:06:39 +0100219 struct sockaddr_storage addrs;
Karsten Graul1a26d022018-03-16 15:06:40 +0100220 struct sockaddr_in6 *addr6;
Karsten Graulc246d942018-03-16 15:06:39 +0100221 struct sockaddr_in *addr;
222 int rc = -ENOENT;
223
Karsten Graulc246d942018-03-16 15:06:39 +0100224 if (!dst) {
225 rc = -ENOTCONN;
226 goto out;
227 }
228 if (!dst->dev) {
229 rc = -ENODEV;
230 goto out_rel;
231 }
232 /* get address to which the internal TCP socket is bound */
Karsten Graul8a00c832021-09-20 21:18:14 +0200233 if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
234 goto out_rel;
Karsten Graulc246d942018-03-16 15:06:39 +0100235 /* analyze IP specific data of net_device belonging to TCP socket */
Karsten Graul1a26d022018-03-16 15:06:40 +0100236 addr6 = (struct sockaddr_in6 *)&addrs;
Karsten Graulc246d942018-03-16 15:06:39 +0100237 rcu_read_lock();
238 if (addrs.ss_family == PF_INET) {
239 /* IPv4 */
240 addr = (struct sockaddr_in *)&addrs;
241 rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop);
Karsten Graul1a26d022018-03-16 15:06:40 +0100242 } else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) {
243 /* mapped IPv4 address - peer is IPv4 only */
244 rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3],
245 prop);
246 } else {
247 /* IPv6 */
248 rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx);
Karsten Graulc246d942018-03-16 15:06:39 +0100249 }
250 rcu_read_unlock();
251out_rel:
252 dst_release(dst);
253out:
254 return rc;
255}
256
257/* match ipv4 addrs of dev against addr in CLC proposal */
258static int smc_clc_prfx_match4_rcu(struct net_device *dev,
259 struct smc_clc_msg_proposal_prefix *prop)
260{
261 struct in_device *in_dev = __in_dev_get_rcu(dev);
Florian Westphalcd5a4112019-05-31 18:27:07 +0200262 const struct in_ifaddr *ifa;
Karsten Graulc246d942018-03-16 15:06:39 +0100263
264 if (!in_dev)
265 return -ENODEV;
Florian Westphalcd5a4112019-05-31 18:27:07 +0200266 in_dev_for_each_ifa_rcu(ifa, in_dev) {
Karsten Graulc246d942018-03-16 15:06:39 +0100267 if (prop->prefix_len == inet_mask_len(ifa->ifa_mask) &&
268 inet_ifa_match(prop->outgoing_subnet, ifa))
269 return 0;
Florian Westphalcd5a4112019-05-31 18:27:07 +0200270 }
Karsten Graulc246d942018-03-16 15:06:39 +0100271
272 return -ENOENT;
273}
274
Karsten Graul1a26d022018-03-16 15:06:40 +0100275/* match ipv6 addrs of dev against addrs in CLC proposal */
276static int smc_clc_prfx_match6_rcu(struct net_device *dev,
277 struct smc_clc_msg_proposal_prefix *prop)
278{
279#if IS_ENABLED(CONFIG_IPV6)
280 struct inet6_dev *in6_dev = __in6_dev_get(dev);
281 struct smc_clc_ipv6_prefix *ipv6_prfx;
282 struct inet6_ifaddr *ifa;
283 int i, max;
284
285 if (!in6_dev)
286 return -ENODEV;
287 /* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */
288 ipv6_prfx = (struct smc_clc_ipv6_prefix *)((u8 *)prop + sizeof(*prop));
289 max = min_t(u8, prop->ipv6_prefixes_cnt, SMC_CLC_MAX_V6_PREFIX);
290 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
291 if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
292 continue;
293 for (i = 0; i < max; i++) {
294 if (ifa->prefix_len == ipv6_prfx[i].prefix_len &&
295 ipv6_prefix_equal(&ifa->addr, &ipv6_prfx[i].prefix,
296 ifa->prefix_len))
297 return 0;
298 }
299 }
300#endif
301 return -ENOENT;
302}
303
Karsten Graulc246d942018-03-16 15:06:39 +0100304/* check if proposed prefixes match one of our device prefixes */
305int smc_clc_prfx_match(struct socket *clcsock,
306 struct smc_clc_msg_proposal_prefix *prop)
307{
308 struct dst_entry *dst = sk_dst_get(clcsock->sk);
Karsten Graul1a26d022018-03-16 15:06:40 +0100309 int rc;
Karsten Graul696cd302018-03-01 13:51:27 +0100310
311 if (!dst) {
312 rc = -ENOTCONN;
313 goto out;
314 }
315 if (!dst->dev) {
316 rc = -ENODEV;
317 goto out_rel;
318 }
Karsten Graul696cd302018-03-01 13:51:27 +0100319 rcu_read_lock();
Karsten Graulc246d942018-03-16 15:06:39 +0100320 if (!prop->ipv6_prefixes_cnt)
321 rc = smc_clc_prfx_match4_rcu(dst->dev, prop);
Karsten Graul1a26d022018-03-16 15:06:40 +0100322 else
323 rc = smc_clc_prfx_match6_rcu(dst->dev, prop);
Karsten Graul696cd302018-03-01 13:51:27 +0100324 rcu_read_unlock();
Karsten Graul696cd302018-03-01 13:51:27 +0100325out_rel:
326 dst_release(dst);
327out:
328 return rc;
329}
330
Ursula Brauna046d572017-01-09 16:55:16 +0100331/* Wait for data on the tcp-socket, analyze received data
332 * Returns:
333 * 0 if success and it was not a decline that we received.
334 * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send.
335 * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
336 */
337int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
Ursula Braun2b59f582018-11-22 10:26:39 +0100338 u8 expected_type, unsigned long timeout)
Ursula Brauna046d572017-01-09 16:55:16 +0100339{
Karsten Graulf6bdc422018-07-18 15:22:51 +0200340 long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
Ursula Brauna046d572017-01-09 16:55:16 +0100341 struct sock *clc_sk = smc->clcsock->sk;
342 struct smc_clc_msg_hdr *clcm = buf;
343 struct msghdr msg = {NULL, 0};
344 int reason_code = 0;
Al Virod63d2712017-09-20 20:21:22 -0400345 struct kvec vec = {buf, buflen};
Ursula Braunfb4f7922020-07-08 17:05:15 +0200346 int len, datlen, recvlen;
347 bool check_trl = true;
Ursula Brauna046d572017-01-09 16:55:16 +0100348 int krflags;
349
350 /* peek the first few bytes to determine length of data to receive
351 * so we don't consume any subsequent CLC message or payload data
352 * in the TCP byte stream
353 */
Al Virod63d2712017-09-20 20:21:22 -0400354 /*
355 * Caller must make sure that buflen is no less than
356 * sizeof(struct smc_clc_msg_hdr)
357 */
Ursula Brauna046d572017-01-09 16:55:16 +0100358 krflags = MSG_PEEK | MSG_WAITALL;
Ursula Braun2b59f582018-11-22 10:26:39 +0100359 clc_sk->sk_rcvtimeo = timeout;
David Howellsaa563d72018-10-20 00:57:56 +0100360 iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
Al Virod63d2712017-09-20 20:21:22 -0400361 sizeof(struct smc_clc_msg_hdr));
362 len = sock_recvmsg(smc->clcsock, &msg, krflags);
Ursula Brauna046d572017-01-09 16:55:16 +0100363 if (signal_pending(current)) {
364 reason_code = -EINTR;
365 clc_sk->sk_err = EINTR;
366 smc->sk.sk_err = EINTR;
367 goto out;
368 }
369 if (clc_sk->sk_err) {
370 reason_code = -clc_sk->sk_err;
Ursula Braun9ed28552018-11-22 10:26:37 +0100371 if (clc_sk->sk_err == EAGAIN &&
372 expected_type == SMC_CLC_DECLINE)
373 clc_sk->sk_err = 0; /* reset for fallback usage */
374 else
375 smc->sk.sk_err = clc_sk->sk_err;
Ursula Brauna046d572017-01-09 16:55:16 +0100376 goto out;
377 }
378 if (!len) { /* peer has performed orderly shutdown */
379 smc->sk.sk_err = ECONNRESET;
380 reason_code = -ECONNRESET;
381 goto out;
382 }
383 if (len < 0) {
Ursula Braun9ed28552018-11-22 10:26:37 +0100384 if (len != -EAGAIN || expected_type != SMC_CLC_DECLINE)
385 smc->sk.sk_err = -len;
Ursula Brauna046d572017-01-09 16:55:16 +0100386 reason_code = len;
387 goto out;
388 }
389 datlen = ntohs(clcm->length);
390 if ((len < sizeof(struct smc_clc_msg_hdr)) ||
Ursula Braun5ac54d82020-09-10 18:48:21 +0200391 (clcm->version < SMC_V1) ||
Ursula Brauna046d572017-01-09 16:55:16 +0100392 ((clcm->type != SMC_CLC_DECLINE) &&
393 (clcm->type != expected_type))) {
394 smc->sk.sk_err = EPROTO;
395 reason_code = -EPROTO;
396 goto out;
397 }
398
399 /* receive the complete CLC message */
Ursula Brauna046d572017-01-09 16:55:16 +0100400 memset(&msg, 0, sizeof(struct msghdr));
Ursula Braunfb4f7922020-07-08 17:05:15 +0200401 if (datlen > buflen) {
402 check_trl = false;
403 recvlen = buflen;
404 } else {
405 recvlen = datlen;
406 }
407 iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
Ursula Brauna046d572017-01-09 16:55:16 +0100408 krflags = MSG_WAITALL;
Al Virod63d2712017-09-20 20:21:22 -0400409 len = sock_recvmsg(smc->clcsock, &msg, krflags);
Ursula Braunfb4f7922020-07-08 17:05:15 +0200410 if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) {
Ursula Brauna046d572017-01-09 16:55:16 +0100411 smc->sk.sk_err = EPROTO;
412 reason_code = -EPROTO;
413 goto out;
414 }
Ursula Braunfb4f7922020-07-08 17:05:15 +0200415 datlen -= len;
416 while (datlen) {
417 u8 tmp[SMC_CLC_RECV_BUF_LEN];
418
419 vec.iov_base = &tmp;
420 vec.iov_len = SMC_CLC_RECV_BUF_LEN;
421 /* receive remaining proposal message */
422 recvlen = datlen > SMC_CLC_RECV_BUF_LEN ?
423 SMC_CLC_RECV_BUF_LEN : datlen;
424 iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
425 len = sock_recvmsg(smc->clcsock, &msg, krflags);
426 datlen -= len;
427 }
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100428 if (clcm->type == SMC_CLC_DECLINE) {
Karsten Graul603cc142018-07-25 16:35:32 +0200429 struct smc_clc_msg_decline *dclc;
430
431 dclc = (struct smc_clc_msg_decline *)clcm;
432 reason_code = SMC_CLC_DECL_PEERDECL;
433 smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
Ursula Braunf1eb02f2020-09-26 12:44:20 +0200434 if (((struct smc_clc_msg_decline *)buf)->hdr.typev2 &
435 SMC_FIRST_CONTACT_MASK) {
Karsten Graul517c3002018-05-15 17:05:03 +0200436 smc->conn.lgr->sync_err = 1;
Karsten Graul5f78fe92020-02-17 16:24:54 +0100437 smc_lgr_terminate_sched(smc->conn.lgr);
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200438 }
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100439 }
440
Ursula Brauna046d572017-01-09 16:55:16 +0100441out:
Ursula Braun2b59f582018-11-22 10:26:39 +0100442 clc_sk->sk_rcvtimeo = rcvtimeo;
Ursula Brauna046d572017-01-09 16:55:16 +0100443 return reason_code;
444}
445
446/* send CLC DECLINE message across internal TCP socket */
Ursula Braune8d726c82020-09-26 12:44:32 +0200447int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version)
Ursula Brauna046d572017-01-09 16:55:16 +0100448{
449 struct smc_clc_msg_decline dclc;
450 struct msghdr msg;
451 struct kvec vec;
452 int len;
453
454 memset(&dclc, 0, sizeof(dclc));
455 memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
456 dclc.hdr.type = SMC_CLC_DECLINE;
457 dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
Ursula Braune8d726c82020-09-26 12:44:32 +0200458 dclc.hdr.version = version;
459 dclc.os_type = version == SMC_V1 ? 0 : SMC_CLC_OS_LINUX;
Ursula Braunf1eb02f2020-09-26 12:44:20 +0200460 dclc.hdr.typev2 = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ?
461 SMC_FIRST_CONTACT_MASK : 0;
Hans Wippela082ec82020-02-25 22:41:22 +0100462 if ((!smc->conn.lgr || !smc->conn.lgr->is_smcd) &&
463 smc_ib_is_valid_local_systemid())
Ursula Braun369537c2020-02-14 08:59:00 +0100464 memcpy(dclc.id_for_peer, local_systemid,
465 sizeof(local_systemid));
Ursula Brauna046d572017-01-09 16:55:16 +0100466 dclc.peer_diagnosis = htonl(peer_diag_info);
467 memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
468
469 memset(&msg, 0, sizeof(msg));
470 vec.iov_base = &dclc;
471 vec.iov_len = sizeof(struct smc_clc_msg_decline);
472 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
473 sizeof(struct smc_clc_msg_decline));
Ursula Braun14d22d42019-01-30 18:51:00 +0100474 if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
Ursula Braun6ae36bf2018-11-22 10:26:36 +0100475 len = -EPROTO;
476 return len > 0 ? 0 : len;
Ursula Brauna046d572017-01-09 16:55:16 +0100477}
478
479/* send CLC PROPOSAL message across internal TCP socket */
Ursula Braund70bf4f2020-09-26 12:44:27 +0200480int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
Ursula Brauna046d572017-01-09 16:55:16 +0100481{
Ursula Braun8c3dca32020-09-26 12:44:28 +0200482 struct smc_clc_smcd_v2_extension *smcd_v2_ext;
Ursula Braun6bb14e482020-09-10 18:48:22 +0200483 struct smc_clc_msg_proposal_prefix *pclc_prfx;
484 struct smc_clc_msg_proposal *pclc_base;
Ursula Braun8c3dca32020-09-26 12:44:28 +0200485 struct smc_clc_smcd_gid_chid *gidchids;
Ursula Braun6bb14e482020-09-10 18:48:22 +0200486 struct smc_clc_msg_proposal_area *pclc;
487 struct smc_clc_ipv6_prefix *ipv6_prfx;
Ursula Braun8c3dca32020-09-26 12:44:28 +0200488 struct smc_clc_v2_extension *v2_ext;
Ursula Braun6bb14e482020-09-10 18:48:22 +0200489 struct smc_clc_msg_smcd *pclc_smcd;
490 struct smc_clc_msg_trail *trl;
Karsten Graul1a26d022018-03-16 15:06:40 +0100491 int len, i, plen, rc;
Ursula Brauna046d572017-01-09 16:55:16 +0100492 int reason_code = 0;
Ursula Braun8c3dca32020-09-26 12:44:28 +0200493 struct kvec vec[8];
Ursula Brauna046d572017-01-09 16:55:16 +0100494 struct msghdr msg;
Ursula Brauna046d572017-01-09 16:55:16 +0100495
Ursula Braun6bb14e482020-09-10 18:48:22 +0200496 pclc = kzalloc(sizeof(*pclc), GFP_KERNEL);
497 if (!pclc)
498 return -ENOMEM;
499
500 pclc_base = &pclc->pclc_base;
501 pclc_smcd = &pclc->pclc_smcd;
502 pclc_prfx = &pclc->pclc_prfx;
503 ipv6_prfx = pclc->pclc_prfx_ipv6;
Ursula Braun8c3dca32020-09-26 12:44:28 +0200504 v2_ext = &pclc->pclc_v2_ext;
505 smcd_v2_ext = &pclc->pclc_smcd_v2_ext;
506 gidchids = pclc->pclc_gidchids;
Ursula Braun6bb14e482020-09-10 18:48:22 +0200507 trl = &pclc->pclc_trl;
508
Ursula Braun8c3dca32020-09-26 12:44:28 +0200509 pclc_base->hdr.version = SMC_V2;
510 pclc_base->hdr.typev1 = ini->smc_type_v1;
511 pclc_base->hdr.typev2 = ini->smc_type_v2;
512 plen = sizeof(*pclc_base) + sizeof(*pclc_smcd) + sizeof(*trl);
513
Karsten Graulc246d942018-03-16 15:06:39 +0100514 /* retrieve ip prefixes for CLC proposal msg */
Ursula Braun8c3dca32020-09-26 12:44:28 +0200515 if (ini->smc_type_v1 != SMC_TYPE_N) {
516 rc = smc_clc_prfx_set(smc->clcsock, pclc_prfx, ipv6_prfx);
517 if (rc) {
518 if (ini->smc_type_v2 == SMC_TYPE_N) {
519 kfree(pclc);
520 return SMC_CLC_DECL_CNFERR;
521 }
522 pclc_base->hdr.typev1 = SMC_TYPE_N;
523 } else {
524 pclc_base->iparea_offset = htons(sizeof(*pclc_smcd));
525 plen += sizeof(*pclc_prfx) +
526 pclc_prfx->ipv6_prefixes_cnt *
527 sizeof(ipv6_prfx[0]);
528 }
Ursula Braun6bb14e482020-09-10 18:48:22 +0200529 }
Karsten Graulc246d942018-03-16 15:06:39 +0100530
Ursula Braun8c3dca32020-09-26 12:44:28 +0200531 /* build SMC Proposal CLC message */
Ursula Braun6bb14e482020-09-10 18:48:22 +0200532 memcpy(pclc_base->hdr.eyecatcher, SMC_EYECATCHER,
533 sizeof(SMC_EYECATCHER));
534 pclc_base->hdr.type = SMC_CLC_PROPOSAL;
Ursula Braund70bf4f2020-09-26 12:44:27 +0200535 if (smcr_indicated(ini->smc_type_v1)) {
Hans Wippelc758dfd2018-06-28 19:05:09 +0200536 /* add SMC-R specifics */
Ursula Braun6bb14e482020-09-10 18:48:22 +0200537 memcpy(pclc_base->lcl.id_for_peer, local_systemid,
Hans Wippelc758dfd2018-06-28 19:05:09 +0200538 sizeof(local_systemid));
Ursula Braun6bb14e482020-09-10 18:48:22 +0200539 memcpy(pclc_base->lcl.gid, ini->ib_gid, SMC_GID_SIZE);
540 memcpy(pclc_base->lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1],
Karsten Graulbc36d2f2019-04-12 12:57:26 +0200541 ETH_ALEN);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200542 }
Ursula Braund70bf4f2020-09-26 12:44:27 +0200543 if (smcd_indicated(ini->smc_type_v1)) {
Hans Wippelc758dfd2018-06-28 19:05:09 +0200544 /* add SMC-D specifics */
Ursula Braun8c3dca32020-09-26 12:44:28 +0200545 if (ini->ism_dev[0]) {
546 pclc_smcd->ism.gid = htonll(ini->ism_dev[0]->local_gid);
547 pclc_smcd->ism.chid =
548 htons(smc_ism_get_chid(ini->ism_dev[0]));
549 }
550 }
551 if (ini->smc_type_v2 == SMC_TYPE_N) {
552 pclc_smcd->v2_ext_offset = 0;
553 } else {
554 u16 v2_ext_offset;
555 u8 *eid = NULL;
556
557 v2_ext_offset = sizeof(*pclc_smcd) -
558 offsetofend(struct smc_clc_msg_smcd, v2_ext_offset);
559 if (ini->smc_type_v1 != SMC_TYPE_N)
560 v2_ext_offset += sizeof(*pclc_prfx) +
561 pclc_prfx->ipv6_prefixes_cnt *
562 sizeof(ipv6_prfx[0]);
563 pclc_smcd->v2_ext_offset = htons(v2_ext_offset);
564 v2_ext->hdr.eid_cnt = 0;
565 v2_ext->hdr.ism_gid_cnt = ini->ism_offered_cnt;
566 v2_ext->hdr.flag.release = SMC_RELEASE;
567 v2_ext->hdr.flag.seid = 1;
568 v2_ext->hdr.smcd_v2_ext_offset = htons(sizeof(*v2_ext) -
569 offsetofend(struct smc_clnt_opts_area_hdr,
570 smcd_v2_ext_offset) +
571 v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN);
572 if (ini->ism_dev[0])
573 smc_ism_get_system_eid(ini->ism_dev[0], &eid);
574 else
575 smc_ism_get_system_eid(ini->ism_dev[1], &eid);
576 if (eid)
577 memcpy(smcd_v2_ext->system_eid, eid, SMC_MAX_EID_LEN);
578 plen += sizeof(*v2_ext) + sizeof(*smcd_v2_ext);
579 if (ini->ism_offered_cnt) {
580 for (i = 1; i <= ini->ism_offered_cnt; i++) {
581 gidchids[i - 1].gid =
582 htonll(ini->ism_dev[i]->local_gid);
583 gidchids[i - 1].chid =
584 htons(smc_ism_get_chid(ini->ism_dev[i]));
585 }
586 plen += ini->ism_offered_cnt *
587 sizeof(struct smc_clc_smcd_gid_chid);
588 }
Hans Wippelc758dfd2018-06-28 19:05:09 +0200589 }
Ursula Braun6bb14e482020-09-10 18:48:22 +0200590 pclc_base->hdr.length = htons(plen);
Ursula Braun6bb14e482020-09-10 18:48:22 +0200591 memcpy(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
Ursula Braun8c3dca32020-09-26 12:44:28 +0200592
593 /* send SMC Proposal CLC message */
Ursula Brauna046d572017-01-09 16:55:16 +0100594 memset(&msg, 0, sizeof(msg));
Karsten Graul1a26d022018-03-16 15:06:40 +0100595 i = 0;
Ursula Braun6bb14e482020-09-10 18:48:22 +0200596 vec[i].iov_base = pclc_base;
597 vec[i++].iov_len = sizeof(*pclc_base);
Ursula Braun8c3dca32020-09-26 12:44:28 +0200598 vec[i].iov_base = pclc_smcd;
599 vec[i++].iov_len = sizeof(*pclc_smcd);
600 if (ini->smc_type_v1 != SMC_TYPE_N) {
601 vec[i].iov_base = pclc_prfx;
602 vec[i++].iov_len = sizeof(*pclc_prfx);
603 if (pclc_prfx->ipv6_prefixes_cnt > 0) {
604 vec[i].iov_base = ipv6_prfx;
605 vec[i++].iov_len = pclc_prfx->ipv6_prefixes_cnt *
606 sizeof(ipv6_prfx[0]);
607 }
Hans Wippelc758dfd2018-06-28 19:05:09 +0200608 }
Ursula Braun8c3dca32020-09-26 12:44:28 +0200609 if (ini->smc_type_v2 != SMC_TYPE_N) {
610 vec[i].iov_base = v2_ext;
611 vec[i++].iov_len = sizeof(*v2_ext);
612 vec[i].iov_base = smcd_v2_ext;
613 vec[i++].iov_len = sizeof(*smcd_v2_ext);
614 if (ini->ism_offered_cnt) {
615 vec[i].iov_base = gidchids;
616 vec[i++].iov_len = ini->ism_offered_cnt *
617 sizeof(struct smc_clc_smcd_gid_chid);
618 }
Karsten Graul1a26d022018-03-16 15:06:40 +0100619 }
Ursula Braun6bb14e482020-09-10 18:48:22 +0200620 vec[i].iov_base = trl;
621 vec[i++].iov_len = sizeof(*trl);
Ursula Brauna046d572017-01-09 16:55:16 +0100622 /* due to the few bytes needed for clc-handshake this cannot block */
Karsten Graul1a26d022018-03-16 15:06:40 +0100623 len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
YueHaibing38189772018-09-18 15:46:38 +0200624 if (len < 0) {
625 smc->sk.sk_err = smc->clcsock->sk->sk_err;
626 reason_code = -smc->sk.sk_err;
Ursula Braun6bb14e482020-09-10 18:48:22 +0200627 } else if (len < ntohs(pclc_base->hdr.length)) {
YueHaibing38189772018-09-18 15:46:38 +0200628 reason_code = -ENETUNREACH;
629 smc->sk.sk_err = -reason_code;
Ursula Brauna046d572017-01-09 16:55:16 +0100630 }
631
Ursula Braun6bb14e482020-09-10 18:48:22 +0200632 kfree(pclc);
Ursula Brauna046d572017-01-09 16:55:16 +0100633 return reason_code;
634}
635
Ursula Braun3d9725a2020-09-10 18:48:23 +0200636/* build and send CLC CONFIRM / ACCEPT message */
637static int smc_clc_send_confirm_accept(struct smc_sock *smc,
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200638 struct smc_clc_msg_accept_confirm_v2 *clc_v2,
639 int first_contact, u8 version)
Ursula Braun3d9725a2020-09-10 18:48:23 +0200640{
641 struct smc_connection *conn = &smc->conn;
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200642 struct smc_clc_msg_accept_confirm *clc;
Ursula Braunb81a5eb2020-09-26 12:44:31 +0200643 struct smc_clc_first_contact_ext fce;
Ursula Braune15c6c42020-09-26 12:44:22 +0200644 struct smc_clc_msg_trail trl;
Ursula Braunb81a5eb2020-09-26 12:44:31 +0200645 struct kvec vec[3];
Ursula Braun3d9725a2020-09-10 18:48:23 +0200646 struct msghdr msg;
Ursula Braunb81a5eb2020-09-26 12:44:31 +0200647 int i, len;
Ursula Braun3d9725a2020-09-10 18:48:23 +0200648
649 /* send SMC Confirm CLC msg */
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200650 clc = (struct smc_clc_msg_accept_confirm *)clc_v2;
651 clc->hdr.version = version; /* SMC version */
Ursula Braun3d9725a2020-09-10 18:48:23 +0200652 if (first_contact)
Ursula Braunf1eb02f2020-09-26 12:44:20 +0200653 clc->hdr.typev2 |= SMC_FIRST_CONTACT_MASK;
Ursula Braun3d9725a2020-09-10 18:48:23 +0200654 if (conn->lgr->is_smcd) {
655 /* SMC-D specific settings */
656 memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER,
657 sizeof(SMCD_EYECATCHER));
Ursula Braunf1eb02f2020-09-26 12:44:20 +0200658 clc->hdr.typev1 = SMC_TYPE_D;
Ursula Braun3d9725a2020-09-10 18:48:23 +0200659 clc->d0.gid = conn->lgr->smcd->local_gid;
660 clc->d0.token = conn->rmb_desc->token;
661 clc->d0.dmbe_size = conn->rmbe_size_short;
662 clc->d0.dmbe_idx = 0;
663 memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200664 if (version == SMC_V1) {
665 clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
666 } else {
667 u8 *eid = NULL;
668
669 clc_v2->chid = htons(smc_ism_get_chid(conn->lgr->smcd));
670 smc_ism_get_system_eid(conn->lgr->smcd, &eid);
671 if (eid)
672 memcpy(clc_v2->eid, eid, SMC_MAX_EID_LEN);
Ursula Braunb81a5eb2020-09-26 12:44:31 +0200673 len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
674 if (first_contact)
675 smc_clc_fill_fce(&fce, &len);
676 clc_v2->hdr.length = htons(len);
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200677 }
Ursula Braune15c6c42020-09-26 12:44:22 +0200678 memcpy(trl.eyecatcher, SMCD_EYECATCHER,
Ursula Braun3d9725a2020-09-10 18:48:23 +0200679 sizeof(SMCD_EYECATCHER));
680 } else {
681 struct smc_link *link = conn->lnk;
682
683 /* SMC-R specific settings */
684 link = conn->lnk;
685 memcpy(clc->hdr.eyecatcher, SMC_EYECATCHER,
686 sizeof(SMC_EYECATCHER));
Ursula Braunf1eb02f2020-09-26 12:44:20 +0200687 clc->hdr.typev1 = SMC_TYPE_R;
Ursula Braun3d9725a2020-09-10 18:48:23 +0200688 clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
689 memcpy(clc->r0.lcl.id_for_peer, local_systemid,
690 sizeof(local_systemid));
691 memcpy(&clc->r0.lcl.gid, link->gid, SMC_GID_SIZE);
692 memcpy(&clc->r0.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
693 ETH_ALEN);
694 hton24(clc->r0.qpn, link->roce_qp->qp_num);
695 clc->r0.rmb_rkey =
696 htonl(conn->rmb_desc->mr_rx[link->link_idx]->rkey);
697 clc->r0.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
698 clc->r0.rmbe_alert_token = htonl(conn->alert_token_local);
699 switch (clc->hdr.type) {
700 case SMC_CLC_ACCEPT:
701 clc->r0.qp_mtu = link->path_mtu;
702 break;
703 case SMC_CLC_CONFIRM:
704 clc->r0.qp_mtu = min(link->path_mtu, link->peer_mtu);
705 break;
706 }
707 clc->r0.rmbe_size = conn->rmbe_size_short;
708 clc->r0.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
709 (conn->rmb_desc->sgt[link->link_idx].sgl));
710 hton24(clc->r0.psn, link->psn_initial);
Ursula Braune15c6c42020-09-26 12:44:22 +0200711 memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
Ursula Braun3d9725a2020-09-10 18:48:23 +0200712 }
713
714 memset(&msg, 0, sizeof(msg));
Ursula Braune15c6c42020-09-26 12:44:22 +0200715 i = 0;
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200716 vec[i].iov_base = clc_v2;
717 if (version > SMC_V1)
718 vec[i++].iov_len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 - sizeof(trl);
719 else
720 vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ?
721 SMCD_CLC_ACCEPT_CONFIRM_LEN :
722 SMCR_CLC_ACCEPT_CONFIRM_LEN) -
723 sizeof(trl);
Ursula Braunb81a5eb2020-09-26 12:44:31 +0200724 if (version > SMC_V1 && first_contact) {
725 vec[i].iov_base = &fce;
726 vec[i++].iov_len = sizeof(fce);
727 }
Ursula Braune15c6c42020-09-26 12:44:22 +0200728 vec[i].iov_base = &trl;
729 vec[i++].iov_len = sizeof(trl);
730 return kernel_sendmsg(smc->clcsock, &msg, vec, 1,
Ursula Braun3d9725a2020-09-10 18:48:23 +0200731 ntohs(clc->hdr.length));
732}
733
Ursula Brauna046d572017-01-09 16:55:16 +0100734/* send CLC CONFIRM message across internal TCP socket */
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200735int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
736 u8 version)
Ursula Brauna046d572017-01-09 16:55:16 +0100737{
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200738 struct smc_clc_msg_accept_confirm_v2 cclc_v2;
Ursula Brauna046d572017-01-09 16:55:16 +0100739 int reason_code = 0;
Ursula Brauna046d572017-01-09 16:55:16 +0100740 int len;
741
742 /* send SMC Confirm CLC msg */
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200743 memset(&cclc_v2, 0, sizeof(cclc_v2));
744 cclc_v2.hdr.type = SMC_CLC_CONFIRM;
745 len = smc_clc_send_confirm_accept(smc, &cclc_v2, clnt_first_contact,
746 version);
747 if (len < ntohs(cclc_v2.hdr.length)) {
Ursula Brauna046d572017-01-09 16:55:16 +0100748 if (len >= 0) {
749 reason_code = -ENETUNREACH;
750 smc->sk.sk_err = -reason_code;
751 } else {
752 smc->sk.sk_err = smc->clcsock->sk->sk_err;
753 reason_code = -smc->sk.sk_err;
754 }
755 }
756 return reason_code;
757}
758
759/* send CLC ACCEPT message across internal TCP socket */
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200760int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact,
761 u8 version)
Ursula Brauna046d572017-01-09 16:55:16 +0100762{
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200763 struct smc_clc_msg_accept_confirm_v2 aclc_v2;
Ursula Brauna046d572017-01-09 16:55:16 +0100764 int len;
765
Ursula Brauna7c9c5f2020-09-26 12:44:30 +0200766 memset(&aclc_v2, 0, sizeof(aclc_v2));
767 aclc_v2.hdr.type = SMC_CLC_ACCEPT;
768 len = smc_clc_send_confirm_accept(new_smc, &aclc_v2, srv_first_contact,
769 version);
770 if (len < ntohs(aclc_v2.hdr.length))
Ursula Braun6ae36bf2018-11-22 10:26:36 +0100771 len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
Ursula Brauna046d572017-01-09 16:55:16 +0100772
Ursula Braun6ae36bf2018-11-22 10:26:36 +0100773 return len > 0 ? 0 : len;
Ursula Brauna046d572017-01-09 16:55:16 +0100774}
Ursula Braunb81a5eb2020-09-26 12:44:31 +0200775
776void __init smc_clc_init(void)
777{
778 struct new_utsname *u;
779
780 memset(smc_hostname, _S, sizeof(smc_hostname)); /* ASCII blanks */
781 u = utsname();
782 memcpy(smc_hostname, u->nodename,
783 min_t(size_t, strlen(u->nodename), sizeof(smc_hostname)));
784}