blob: de73432bd72f6d574341f9c9a11c2149a25f0ac8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braun9bf9abe2017-01-09 16:55:21 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Link Layer Control (LLC)
6 *
Ursula Braun9bf9abe2017-01-09 16:55:21 +01007 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com>
10 * Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13#include <net/tcp.h>
14#include <rdma/ib_verbs.h>
15
16#include "smc.h"
17#include "smc_core.h"
18#include "smc_clc.h"
19#include "smc_llc.h"
Karsten Graul336ba092020-05-03 14:38:40 +020020#include "smc_pnet.h"
Ursula Braun9bf9abe2017-01-09 16:55:21 +010021
Stefan Raspl0f627122018-03-01 13:51:26 +010022#define SMC_LLC_DATA_LEN 40
23
24struct smc_llc_hdr {
25 struct smc_wr_rx_hdr common;
26 u8 length; /* 44 */
Karsten Graul52bedf32018-03-01 13:51:32 +010027#if defined(__BIG_ENDIAN_BITFIELD)
28 u8 reserved:4,
29 add_link_rej_rsn:4;
30#elif defined(__LITTLE_ENDIAN_BITFIELD)
31 u8 add_link_rej_rsn:4,
32 reserved:4;
33#endif
Stefan Raspl0f627122018-03-01 13:51:26 +010034 u8 flags;
35};
36
Karsten Graul75d320d2018-03-01 13:51:31 +010037#define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
38
Stefan Raspl0f627122018-03-01 13:51:26 +010039struct smc_llc_msg_confirm_link { /* type 0x01 */
40 struct smc_llc_hdr hd;
41 u8 sender_mac[ETH_ALEN];
42 u8 sender_gid[SMC_GID_SIZE];
43 u8 sender_qp_num[3];
44 u8 link_num;
45 u8 link_uid[SMC_LGR_ID_SIZE];
46 u8 max_links;
47 u8 reserved[9];
48};
49
Karsten Graul52bedf32018-03-01 13:51:32 +010050#define SMC_LLC_FLAG_ADD_LNK_REJ 0x40
51#define SMC_LLC_REJ_RSN_NO_ALT_PATH 1
52
53#define SMC_LLC_ADD_LNK_MAX_LINKS 2
54
55struct smc_llc_msg_add_link { /* type 0x02 */
56 struct smc_llc_hdr hd;
57 u8 sender_mac[ETH_ALEN];
58 u8 reserved2[2];
59 u8 sender_gid[SMC_GID_SIZE];
60 u8 sender_qp_num[3];
61 u8 link_num;
Karsten Graulfbed3b32020-05-01 12:48:04 +020062#if defined(__BIG_ENDIAN_BITFIELD)
63 u8 reserved3 : 4,
64 qp_mtu : 4;
65#elif defined(__LITTLE_ENDIAN_BITFIELD)
66 u8 qp_mtu : 4,
67 reserved3 : 4;
68#endif
Karsten Graul52bedf32018-03-01 13:51:32 +010069 u8 initial_psn[3];
70 u8 reserved[8];
71};
72
Karsten Graul87f88cd2020-05-03 14:38:41 +020073struct smc_llc_msg_add_link_cont_rt {
74 __be32 rmb_key;
75 __be32 rmb_key_new;
76 __be64 rmb_vaddr_new;
77};
78
79#define SMC_LLC_RKEYS_PER_CONT_MSG 2
80
81struct smc_llc_msg_add_link_cont { /* type 0x03 */
82 struct smc_llc_hdr hd;
83 u8 link_num;
84 u8 num_rkeys;
85 u8 reserved2[2];
86 struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG];
87 u8 reserved[4];
88} __packed; /* format defined in RFC7609 */
89
Karsten Graul52bedf32018-03-01 13:51:32 +010090#define SMC_LLC_FLAG_DEL_LINK_ALL 0x40
91#define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20
92
93struct smc_llc_msg_del_link { /* type 0x04 */
94 struct smc_llc_hdr hd;
95 u8 link_num;
96 __be32 reason;
97 u8 reserved[35];
98} __packed; /* format defined in RFC7609 */
99
Karsten Graul313164d2018-03-01 13:51:29 +0100100struct smc_llc_msg_test_link { /* type 0x07 */
101 struct smc_llc_hdr hd;
102 u8 user_data[16];
103 u8 reserved[24];
104};
105
Karsten Graul4ed75de2018-03-01 13:51:30 +0100106struct smc_rmb_rtoken {
107 union {
108 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */
109 /* is actually the num of rtokens, first */
110 /* rtoken is always for the current link */
111 u8 link_id; /* link id of the rtoken */
112 };
113 __be32 rmb_key;
114 __be64 rmb_vaddr;
115} __packed; /* format defined in RFC7609 */
116
117#define SMC_LLC_RKEYS_PER_MSG 3
118
119struct smc_llc_msg_confirm_rkey { /* type 0x06 */
120 struct smc_llc_hdr hd;
121 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
122 u8 reserved;
123};
124
Karsten Graul4ed75de2018-03-01 13:51:30 +0100125#define SMC_LLC_DEL_RKEY_MAX 8
Karsten Graul3bc67e02020-04-30 15:55:48 +0200126#define SMC_LLC_FLAG_RKEY_RETRY 0x10
Karsten Graul4ed75de2018-03-01 13:51:30 +0100127#define SMC_LLC_FLAG_RKEY_NEG 0x20
128
129struct smc_llc_msg_delete_rkey { /* type 0x09 */
130 struct smc_llc_hdr hd;
131 u8 num_rkeys;
132 u8 err_mask;
133 u8 reserved[2];
134 __be32 rkey[8];
135 u8 reserved2[4];
136};
137
Stefan Raspl0f627122018-03-01 13:51:26 +0100138union smc_llc_msg {
139 struct smc_llc_msg_confirm_link confirm_link;
Karsten Graul52bedf32018-03-01 13:51:32 +0100140 struct smc_llc_msg_add_link add_link;
Karsten Graul87f88cd2020-05-03 14:38:41 +0200141 struct smc_llc_msg_add_link_cont add_link_cont;
Karsten Graul52bedf32018-03-01 13:51:32 +0100142 struct smc_llc_msg_del_link delete_link;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100143
144 struct smc_llc_msg_confirm_rkey confirm_rkey;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100145 struct smc_llc_msg_delete_rkey delete_rkey;
146
Karsten Graul313164d2018-03-01 13:51:29 +0100147 struct smc_llc_msg_test_link test_link;
Stefan Raspl0f627122018-03-01 13:51:26 +0100148 struct {
149 struct smc_llc_hdr hdr;
150 u8 data[SMC_LLC_DATA_LEN];
151 } raw;
152};
153
154#define SMC_LLC_FLAG_RESP 0x80
155
Karsten Graul6c8968c2020-04-29 17:10:46 +0200156struct smc_llc_qentry {
157 struct list_head list;
158 struct smc_link *link;
159 union smc_llc_msg msg;
160};
161
Karsten Graul555da9a2020-04-30 15:55:38 +0200162struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
163{
164 struct smc_llc_qentry *qentry = flow->qentry;
165
166 flow->qentry = NULL;
167 return qentry;
168}
169
170void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
171{
172 struct smc_llc_qentry *qentry;
173
174 if (flow->qentry) {
175 qentry = flow->qentry;
176 flow->qentry = NULL;
177 kfree(qentry);
178 }
179}
180
181static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
182 struct smc_llc_qentry *qentry)
183{
184 flow->qentry = qentry;
185}
186
187/* try to start a new llc flow, initiated by an incoming llc msg */
188static bool smc_llc_flow_start(struct smc_llc_flow *flow,
189 struct smc_llc_qentry *qentry)
190{
191 struct smc_link_group *lgr = qentry->link->lgr;
192
193 spin_lock_bh(&lgr->llc_flow_lock);
194 if (flow->type) {
195 /* a flow is already active */
196 if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
197 qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
198 !lgr->delayed_event) {
199 lgr->delayed_event = qentry;
200 } else {
201 /* forget this llc request */
202 kfree(qentry);
203 }
204 spin_unlock_bh(&lgr->llc_flow_lock);
205 return false;
206 }
207 switch (qentry->msg.raw.hdr.common.type) {
208 case SMC_LLC_ADD_LINK:
209 flow->type = SMC_LLC_FLOW_ADD_LINK;
210 break;
211 case SMC_LLC_DELETE_LINK:
212 flow->type = SMC_LLC_FLOW_DEL_LINK;
213 break;
214 case SMC_LLC_CONFIRM_RKEY:
215 case SMC_LLC_DELETE_RKEY:
216 flow->type = SMC_LLC_FLOW_RKEY;
217 break;
218 default:
219 flow->type = SMC_LLC_FLOW_NONE;
220 }
221 if (qentry == lgr->delayed_event)
222 lgr->delayed_event = NULL;
223 spin_unlock_bh(&lgr->llc_flow_lock);
224 smc_llc_flow_qentry_set(flow, qentry);
225 return true;
226}
227
228/* start a new local llc flow, wait till current flow finished */
229int smc_llc_flow_initiate(struct smc_link_group *lgr,
230 enum smc_llc_flowtype type)
231{
232 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
233 int rc;
234
235 /* all flows except confirm_rkey and delete_rkey are exclusive,
236 * confirm/delete rkey flows can run concurrently (local and remote)
237 */
238 if (type == SMC_LLC_FLOW_RKEY)
239 allowed_remote = SMC_LLC_FLOW_RKEY;
240again:
241 if (list_empty(&lgr->list))
242 return -ENODEV;
243 spin_lock_bh(&lgr->llc_flow_lock);
244 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
245 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
246 lgr->llc_flow_rmt.type == allowed_remote)) {
247 lgr->llc_flow_lcl.type = type;
248 spin_unlock_bh(&lgr->llc_flow_lock);
249 return 0;
250 }
251 spin_unlock_bh(&lgr->llc_flow_lock);
252 rc = wait_event_interruptible_timeout(lgr->llc_waiter,
253 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
254 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
255 lgr->llc_flow_rmt.type == allowed_remote)),
256 SMC_LLC_WAIT_TIME);
257 if (!rc)
258 return -ETIMEDOUT;
259 goto again;
260}
261
262/* finish the current llc flow */
263void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
264{
265 spin_lock_bh(&lgr->llc_flow_lock);
266 memset(flow, 0, sizeof(*flow));
267 flow->type = SMC_LLC_FLOW_NONE;
268 spin_unlock_bh(&lgr->llc_flow_lock);
269 if (!list_empty(&lgr->list) && lgr->delayed_event &&
270 flow == &lgr->llc_flow_lcl)
271 schedule_work(&lgr->llc_event_work);
272 else
273 wake_up_interruptible(&lgr->llc_waiter);
274}
275
276/* lnk is optional and used for early wakeup when link goes down, useful in
277 * cases where we wait for a response on the link after we sent a request
278 */
279struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
280 struct smc_link *lnk,
281 int time_out, u8 exp_msg)
282{
283 struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
284
285 wait_event_interruptible_timeout(lgr->llc_waiter,
286 (flow->qentry ||
287 (lnk && !smc_link_usable(lnk)) ||
288 list_empty(&lgr->list)),
289 time_out);
290 if (!flow->qentry ||
291 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
292 smc_llc_flow_qentry_del(flow);
293 goto out;
294 }
295 if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
296 if (exp_msg == SMC_LLC_ADD_LINK &&
297 flow->qentry->msg.raw.hdr.common.type ==
298 SMC_LLC_DELETE_LINK) {
299 /* flow_start will delay the unexpected msg */
300 smc_llc_flow_start(&lgr->llc_flow_lcl,
301 smc_llc_flow_qentry_clr(flow));
302 return NULL;
303 }
304 smc_llc_flow_qentry_del(flow);
305 }
306out:
307 return flow->qentry;
308}
309
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100310/********************************** send *************************************/
311
312struct smc_llc_tx_pend {
313};
314
315/* handler for send/transmission completion of an LLC msg */
316static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
317 struct smc_link *link,
318 enum ib_wc_status wc_status)
319{
320 /* future work: handle wc_status error for recovery and failover */
321}
322
323/**
324 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
325 * @link: Pointer to SMC link used for sending LLC control message.
326 * @wr_buf: Out variable returning pointer to work request payload buffer.
327 * @pend: Out variable returning pointer to private pending WR tracking.
328 * It's the context the transmit complete handler will get.
329 *
330 * Reserves and pre-fills an entry for a pending work request send/tx.
331 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
332 * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
333 *
334 * Return: 0 on success, otherwise an error value.
335 */
336static int smc_llc_add_pending_send(struct smc_link *link,
337 struct smc_wr_buf **wr_buf,
338 struct smc_wr_tx_pend_priv **pend)
339{
340 int rc;
341
Ursula Braunad6f3172019-02-04 13:44:44 +0100342 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
343 pend);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100344 if (rc < 0)
345 return rc;
346 BUILD_BUG_ON_MSG(
347 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
348 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
349 BUILD_BUG_ON_MSG(
350 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
351 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
352 BUILD_BUG_ON_MSG(
353 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
354 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
355 return 0;
356}
357
358/* high-level API to send LLC confirm link */
Ursula Braun947541f2018-07-25 16:35:30 +0200359int smc_llc_send_confirm_link(struct smc_link *link,
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100360 enum smc_llc_reqresp reqresp)
361{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200362 struct smc_link_group *lgr = smc_get_lgr(link);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100363 struct smc_llc_msg_confirm_link *confllc;
364 struct smc_wr_tx_pend_priv *pend;
365 struct smc_wr_buf *wr_buf;
366 int rc;
367
368 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
369 if (rc)
370 return rc;
371 confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
372 memset(confllc, 0, sizeof(*confllc));
373 confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
374 confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
Karsten Graul75d320d2018-03-01 13:51:31 +0100375 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100376 if (reqresp == SMC_LLC_RESP)
377 confllc->hd.flags |= SMC_LLC_FLAG_RESP;
Ursula Braun947541f2018-07-25 16:35:30 +0200378 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
379 ETH_ALEN);
Ursula Braun7005ada2018-07-25 16:35:31 +0200380 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100381 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
Karsten Graul2be922f2018-02-28 12:44:08 +0100382 confllc->link_num = link->link_id;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100383 memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
Karsten Graulb1570a82020-05-03 14:38:42 +0200384 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
Karsten Graul52bedf32018-03-01 13:51:32 +0100385 /* send llc message */
386 rc = smc_wr_tx_send(link, pend);
387 return rc;
388}
389
Karsten Graul44aa81c2018-05-15 17:04:55 +0200390/* send LLC confirm rkey request */
Karsten Graul3d88a212020-04-30 15:55:44 +0200391static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
Karsten Graul44aa81c2018-05-15 17:04:55 +0200392 struct smc_buf_desc *rmb_desc)
393{
394 struct smc_llc_msg_confirm_rkey *rkeyllc;
395 struct smc_wr_tx_pend_priv *pend;
396 struct smc_wr_buf *wr_buf;
Karsten Graul3d88a212020-04-30 15:55:44 +0200397 struct smc_link *link;
398 int i, rc, rtok_ix;
Karsten Graul44aa81c2018-05-15 17:04:55 +0200399
Karsten Graul3d88a212020-04-30 15:55:44 +0200400 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
Karsten Graul44aa81c2018-05-15 17:04:55 +0200401 if (rc)
402 return rc;
403 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
404 memset(rkeyllc, 0, sizeof(*rkeyllc));
405 rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
406 rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey);
Karsten Graul3d88a212020-04-30 15:55:44 +0200407
408 rtok_ix = 1;
409 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
410 link = &send_link->lgr->lnk[i];
411 if (link->state == SMC_LNK_ACTIVE && link != send_link) {
412 rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
413 rkeyllc->rtoken[rtok_ix].rmb_key =
414 htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
415 rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64(
416 (u64)sg_dma_address(
417 rmb_desc->sgt[link->link_idx].sgl));
418 rtok_ix++;
419 }
420 }
421 /* rkey of send_link is in rtoken[0] */
422 rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
Karsten Graul44aa81c2018-05-15 17:04:55 +0200423 rkeyllc->rtoken[0].rmb_key =
Karsten Graul3d88a212020-04-30 15:55:44 +0200424 htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey);
Karsten Graul44aa81c2018-05-15 17:04:55 +0200425 rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64(
Karsten Graul3d88a212020-04-30 15:55:44 +0200426 (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
Karsten Graul44aa81c2018-05-15 17:04:55 +0200427 /* send llc message */
Karsten Graul3d88a212020-04-30 15:55:44 +0200428 rc = smc_wr_tx_send(send_link, pend);
Karsten Graul44aa81c2018-05-15 17:04:55 +0200429 return rc;
430}
431
Karsten Graul60e03c62018-11-22 10:26:42 +0100432/* send LLC delete rkey request */
433static int smc_llc_send_delete_rkey(struct smc_link *link,
434 struct smc_buf_desc *rmb_desc)
435{
436 struct smc_llc_msg_delete_rkey *rkeyllc;
437 struct smc_wr_tx_pend_priv *pend;
438 struct smc_wr_buf *wr_buf;
439 int rc;
440
441 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
442 if (rc)
443 return rc;
444 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
445 memset(rkeyllc, 0, sizeof(*rkeyllc));
446 rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
447 rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
448 rkeyllc->num_rkeys = 1;
Karsten Graul387707f2020-04-29 17:10:40 +0200449 rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
Karsten Graul60e03c62018-11-22 10:26:42 +0100450 /* send llc message */
451 rc = smc_wr_tx_send(link, pend);
452 return rc;
453}
454
Karsten Graul52bedf32018-03-01 13:51:32 +0100455/* send ADD LINK request or response */
Ursula Braun7005ada2018-07-25 16:35:31 +0200456int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
Karsten Graulfbed3b32020-05-01 12:48:04 +0200457 struct smc_link *link_new,
Karsten Graul52bedf32018-03-01 13:51:32 +0100458 enum smc_llc_reqresp reqresp)
459{
460 struct smc_llc_msg_add_link *addllc;
461 struct smc_wr_tx_pend_priv *pend;
462 struct smc_wr_buf *wr_buf;
463 int rc;
464
465 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
466 if (rc)
467 return rc;
468 addllc = (struct smc_llc_msg_add_link *)wr_buf;
Karsten Graulfbed3b32020-05-01 12:48:04 +0200469
470 memset(addllc, 0, sizeof(*addllc));
471 addllc->hd.common.type = SMC_LLC_ADD_LINK;
472 addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
473 if (reqresp == SMC_LLC_RESP)
474 addllc->hd.flags |= SMC_LLC_FLAG_RESP;
475 memcpy(addllc->sender_mac, mac, ETH_ALEN);
476 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
477 if (link_new) {
478 addllc->link_num = link_new->link_id;
479 hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num);
480 hton24(addllc->initial_psn, link_new->psn_initial);
481 if (reqresp == SMC_LLC_REQ)
482 addllc->qp_mtu = link_new->path_mtu;
483 else
484 addllc->qp_mtu = min(link_new->path_mtu,
485 link_new->peer_mtu);
486 }
Karsten Graul52bedf32018-03-01 13:51:32 +0100487 /* send llc message */
488 rc = smc_wr_tx_send(link, pend);
489 return rc;
490}
491
492/* send DELETE LINK request or response */
Karsten Graulfbed3b32020-05-01 12:48:04 +0200493int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
494 enum smc_llc_reqresp reqresp, bool orderly,
495 u32 reason)
Karsten Graul52bedf32018-03-01 13:51:32 +0100496{
497 struct smc_llc_msg_del_link *delllc;
498 struct smc_wr_tx_pend_priv *pend;
499 struct smc_wr_buf *wr_buf;
500 int rc;
501
502 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
503 if (rc)
504 return rc;
505 delllc = (struct smc_llc_msg_del_link *)wr_buf;
Karsten Graulfbed3b32020-05-01 12:48:04 +0200506
507 memset(delllc, 0, sizeof(*delllc));
508 delllc->hd.common.type = SMC_LLC_DELETE_LINK;
509 delllc->hd.length = sizeof(struct smc_llc_msg_del_link);
510 if (reqresp == SMC_LLC_RESP)
511 delllc->hd.flags |= SMC_LLC_FLAG_RESP;
512 if (orderly)
513 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
514 if (link_del_id)
515 delllc->link_num = link_del_id;
516 else
517 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
518 delllc->reason = htonl(reason);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100519 /* send llc message */
520 rc = smc_wr_tx_send(link, pend);
521 return rc;
522}
523
Karsten Grauld97935f2018-05-15 17:04:57 +0200524/* send LLC test link request */
525static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
Karsten Graul313164d2018-03-01 13:51:29 +0100526{
527 struct smc_llc_msg_test_link *testllc;
528 struct smc_wr_tx_pend_priv *pend;
529 struct smc_wr_buf *wr_buf;
530 int rc;
531
532 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
533 if (rc)
534 return rc;
535 testllc = (struct smc_llc_msg_test_link *)wr_buf;
536 memset(testllc, 0, sizeof(*testllc));
537 testllc->hd.common.type = SMC_LLC_TEST_LINK;
538 testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
Karsten Graul313164d2018-03-01 13:51:29 +0100539 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
540 /* send llc message */
541 rc = smc_wr_tx_send(link, pend);
542 return rc;
543}
544
Karsten Graul6c8968c2020-04-29 17:10:46 +0200545/* schedule an llc send on link, may wait for buffers */
546static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
Karsten Graul4ed75de2018-03-01 13:51:30 +0100547{
548 struct smc_wr_tx_pend_priv *pend;
549 struct smc_wr_buf *wr_buf;
550 int rc;
551
Karsten Graul6c8968c2020-04-29 17:10:46 +0200552 if (!smc_link_usable(link))
553 return -ENOLINK;
554 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100555 if (rc)
Karsten Graul6c8968c2020-04-29 17:10:46 +0200556 return rc;
557 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
558 return smc_wr_tx_send(link, pend);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100559}
560
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100561/********************************* receive ***********************************/
562
Karsten Graul336ba092020-05-03 14:38:40 +0200563static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
564 enum smc_lgr_type lgr_new_t)
565{
566 int i;
567
568 if (lgr->type == SMC_LGR_SYMMETRIC ||
569 (lgr->type != SMC_LGR_SINGLE &&
570 (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
571 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)))
572 return -EMLINK;
573
574 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
575 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) {
576 for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--)
577 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
578 return i;
579 } else {
580 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
581 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
582 return i;
583 }
584 return -EMLINK;
585}
586
Karsten Graul87f88cd2020-05-03 14:38:41 +0200587/* return first buffer from any of the next buf lists */
588static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
589 int *buf_lst)
590{
591 struct smc_buf_desc *buf_pos;
592
593 while (*buf_lst < SMC_RMBE_SIZES) {
594 buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
595 struct smc_buf_desc, list);
596 if (buf_pos)
597 return buf_pos;
598 (*buf_lst)++;
599 }
600 return NULL;
601}
602
603/* return next rmb from buffer lists */
604static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
605 int *buf_lst,
606 struct smc_buf_desc *buf_pos)
607{
608 struct smc_buf_desc *buf_next;
609
610 if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
611 (*buf_lst)++;
612 return _smc_llc_get_next_rmb(lgr, buf_lst);
613 }
614 buf_next = list_next_entry(buf_pos, list);
615 return buf_next;
616}
617
618static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
619 int *buf_lst)
620{
621 *buf_lst = 0;
622 return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
623}
624
625/* send one add_link_continue msg */
626static int smc_llc_add_link_cont(struct smc_link *link,
627 struct smc_link *link_new, u8 *num_rkeys_todo,
628 int *buf_lst, struct smc_buf_desc **buf_pos)
629{
630 struct smc_llc_msg_add_link_cont *addc_llc;
631 struct smc_link_group *lgr = link->lgr;
632 int prim_lnk_idx, lnk_idx, i, rc;
633 struct smc_wr_tx_pend_priv *pend;
634 struct smc_wr_buf *wr_buf;
635 struct smc_buf_desc *rmb;
636 u8 n;
637
638 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
639 if (rc)
640 return rc;
641 addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
642 memset(addc_llc, 0, sizeof(*addc_llc));
643
644 prim_lnk_idx = link->link_idx;
645 lnk_idx = link_new->link_idx;
646 addc_llc->link_num = link_new->link_id;
647 addc_llc->num_rkeys = *num_rkeys_todo;
648 n = *num_rkeys_todo;
649 for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
650 if (!*buf_pos) {
651 addc_llc->num_rkeys = addc_llc->num_rkeys -
652 *num_rkeys_todo;
653 *num_rkeys_todo = 0;
654 break;
655 }
656 rmb = *buf_pos;
657
658 addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
659 addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
660 addc_llc->rt[i].rmb_vaddr_new =
661 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
662
663 (*num_rkeys_todo)--;
664 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
665 while (*buf_pos && !(*buf_pos)->used)
666 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
667 }
668 addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT;
669 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
670 if (lgr->role == SMC_CLNT)
671 addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
672 return smc_wr_tx_send(link, pend);
673}
674
675static int smc_llc_cli_rkey_exchange(struct smc_link *link,
676 struct smc_link *link_new)
677{
678 struct smc_llc_msg_add_link_cont *addc_llc;
679 struct smc_link_group *lgr = link->lgr;
680 u8 max, num_rkeys_send, num_rkeys_recv;
681 struct smc_llc_qentry *qentry;
682 struct smc_buf_desc *buf_pos;
683 int buf_lst;
684 int rc = 0;
685 int i;
686
687 mutex_lock(&lgr->rmbs_lock);
688 num_rkeys_send = lgr->conns_num;
689 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
690 do {
691 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME,
692 SMC_LLC_ADD_LINK_CONT);
693 if (!qentry) {
694 rc = -ETIMEDOUT;
695 break;
696 }
697 addc_llc = &qentry->msg.add_link_cont;
698 num_rkeys_recv = addc_llc->num_rkeys;
699 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
700 for (i = 0; i < max; i++) {
701 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
702 addc_llc->rt[i].rmb_key,
703 addc_llc->rt[i].rmb_vaddr_new,
704 addc_llc->rt[i].rmb_key_new);
705 num_rkeys_recv--;
706 }
707 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
708 rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
709 &buf_lst, &buf_pos);
710 if (rc)
711 break;
712 } while (num_rkeys_send || num_rkeys_recv);
713
714 mutex_unlock(&lgr->rmbs_lock);
715 return rc;
716}
717
Karsten Graul336ba092020-05-03 14:38:40 +0200718/* prepare and send an add link reject response */
719static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
720{
721 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
722 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
723 qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
724 return smc_llc_send_message(qentry->link, &qentry->msg);
725}
726
Karsten Graulb1570a82020-05-03 14:38:42 +0200727static int smc_llc_cli_conf_link(struct smc_link *link,
728 struct smc_init_info *ini,
729 struct smc_link *link_new,
730 enum smc_lgr_type lgr_new_t)
731{
732 struct smc_link_group *lgr = link->lgr;
733 struct smc_llc_msg_del_link *del_llc;
734 struct smc_llc_qentry *qentry = NULL;
735 int rc = 0;
736
737 /* receive CONFIRM LINK request over RoCE fabric */
738 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0);
739 if (!qentry) {
740 rc = smc_llc_send_delete_link(link, link_new->link_id,
741 SMC_LLC_REQ, false,
742 SMC_LLC_DEL_LOST_PATH);
743 return -ENOLINK;
744 }
745 if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
746 /* received DELETE_LINK instead */
747 del_llc = &qentry->msg.delete_link;
748 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
749 smc_llc_send_message(link, &qentry->msg);
750 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
751 return -ENOLINK;
752 }
753 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
754
755 rc = smc_ib_modify_qp_rts(link_new);
756 if (rc) {
757 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
758 false, SMC_LLC_DEL_LOST_PATH);
759 return -ENOLINK;
760 }
761 smc_wr_remember_qp_attr(link_new);
762
763 rc = smcr_buf_reg_lgr(link_new);
764 if (rc) {
765 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
766 false, SMC_LLC_DEL_LOST_PATH);
767 return -ENOLINK;
768 }
769
770 /* send CONFIRM LINK response over RoCE fabric */
771 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP);
772 if (rc) {
773 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
774 false, SMC_LLC_DEL_LOST_PATH);
775 return -ENOLINK;
776 }
777 smc_llc_link_active(link_new);
778 lgr->type = lgr_new_t;
779 return 0;
780}
781
Karsten Graul336ba092020-05-03 14:38:40 +0200782static void smc_llc_save_add_link_info(struct smc_link *link,
783 struct smc_llc_msg_add_link *add_llc)
784{
785 link->peer_qpn = ntoh24(add_llc->sender_qp_num);
786 memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE);
787 memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN);
788 link->peer_psn = ntoh24(add_llc->initial_psn);
789 link->peer_mtu = add_llc->qp_mtu;
790}
791
792/* as an SMC client, process an add link request */
793int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
794{
795 struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
796 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
797 struct smc_link_group *lgr = smc_get_lgr(link);
798 struct smc_link *lnk_new = NULL;
799 struct smc_init_info ini;
800 int lnk_idx, rc = 0;
801
802 ini.vlan_id = lgr->vlan_id;
803 smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
804 if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
805 !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) {
806 if (!ini.ib_dev)
807 goto out_reject;
808 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
809 }
810 if (!ini.ib_dev) {
811 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
812 ini.ib_dev = link->smcibdev;
813 ini.ib_port = link->ibport;
814 }
815 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
816 if (lnk_idx < 0)
817 goto out_reject;
818 lnk_new = &lgr->lnk[lnk_idx];
819 rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini);
820 if (rc)
821 goto out_reject;
822 smc_llc_save_add_link_info(lnk_new, llc);
823 lnk_new->link_id = llc->link_num;
824
825 rc = smc_ib_ready_link(lnk_new);
826 if (rc)
827 goto out_clear_lnk;
828
829 rc = smcr_buf_map_lgr(lnk_new);
830 if (rc)
831 goto out_clear_lnk;
832
833 rc = smc_llc_send_add_link(link,
834 lnk_new->smcibdev->mac[ini.ib_port - 1],
835 lnk_new->gid, lnk_new, SMC_LLC_RESP);
836 if (rc)
837 goto out_clear_lnk;
Karsten Graul87f88cd2020-05-03 14:38:41 +0200838 rc = smc_llc_cli_rkey_exchange(link, lnk_new);
Karsten Graul336ba092020-05-03 14:38:40 +0200839 if (rc) {
840 rc = 0;
841 goto out_clear_lnk;
842 }
Karsten Graulb1570a82020-05-03 14:38:42 +0200843 rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t);
Karsten Graul336ba092020-05-03 14:38:40 +0200844 if (!rc)
845 goto out;
846out_clear_lnk:
847 smcr_link_clear(lnk_new);
848out_reject:
849 smc_llc_cli_add_link_reject(qentry);
850out:
851 kfree(qentry);
852 return rc;
853}
854
Karsten Graulb1570a82020-05-03 14:38:42 +0200855static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
856{
857 struct smc_llc_qentry *qentry;
858
859 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
860
861 mutex_lock(&lgr->llc_conf_mutex);
862 smc_llc_cli_add_link(qentry->link, qentry);
863 mutex_unlock(&lgr->llc_conf_mutex);
864}
865
Karsten Graul57b49922020-05-03 14:38:44 +0200866static int smc_llc_srv_rkey_exchange(struct smc_link *link,
867 struct smc_link *link_new)
868{
869 struct smc_llc_msg_add_link_cont *addc_llc;
870 struct smc_link_group *lgr = link->lgr;
871 u8 max, num_rkeys_send, num_rkeys_recv;
872 struct smc_llc_qentry *qentry = NULL;
873 struct smc_buf_desc *buf_pos;
874 int buf_lst;
875 int rc = 0;
876 int i;
877
878 mutex_lock(&lgr->rmbs_lock);
879 num_rkeys_send = lgr->conns_num;
880 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
881 do {
882 smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
883 &buf_lst, &buf_pos);
884 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME,
885 SMC_LLC_ADD_LINK_CONT);
886 if (!qentry) {
887 rc = -ETIMEDOUT;
888 goto out;
889 }
890 addc_llc = &qentry->msg.add_link_cont;
891 num_rkeys_recv = addc_llc->num_rkeys;
892 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
893 for (i = 0; i < max; i++) {
894 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
895 addc_llc->rt[i].rmb_key,
896 addc_llc->rt[i].rmb_vaddr_new,
897 addc_llc->rt[i].rmb_key_new);
898 num_rkeys_recv--;
899 }
900 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
901 } while (num_rkeys_send || num_rkeys_recv);
902out:
903 mutex_unlock(&lgr->rmbs_lock);
904 return rc;
905}
906
Karsten Graul2d2209f2020-05-03 14:38:43 +0200907int smc_llc_srv_add_link(struct smc_link *link)
908{
909 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
910 struct smc_link_group *lgr = link->lgr;
911 struct smc_llc_msg_add_link *add_llc;
912 struct smc_llc_qentry *qentry = NULL;
913 struct smc_link *link_new;
914 struct smc_init_info ini;
915 int lnk_idx, rc = 0;
916
917 /* ignore client add link recommendation, start new flow */
918 ini.vlan_id = lgr->vlan_id;
919 smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
920 if (!ini.ib_dev) {
921 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
922 ini.ib_dev = link->smcibdev;
923 ini.ib_port = link->ibport;
924 }
925 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
926 if (lnk_idx < 0)
927 return 0;
928
929 rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini);
930 if (rc)
931 return rc;
932 link_new = &lgr->lnk[lnk_idx];
933 rc = smc_llc_send_add_link(link,
934 link_new->smcibdev->mac[ini.ib_port - 1],
935 link_new->gid, link_new, SMC_LLC_REQ);
936 if (rc)
937 goto out_err;
938 /* receive ADD LINK response over the RoCE fabric */
939 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
940 if (!qentry) {
941 rc = -ETIMEDOUT;
942 goto out_err;
943 }
944 add_llc = &qentry->msg.add_link;
945 if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) {
946 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
947 rc = -ENOLINK;
948 goto out_err;
949 }
950 if (lgr->type == SMC_LGR_SINGLE &&
951 (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
952 !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) {
953 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
954 }
955 smc_llc_save_add_link_info(link_new, add_llc);
956 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
957
958 rc = smc_ib_ready_link(link_new);
959 if (rc)
960 goto out_err;
961 rc = smcr_buf_map_lgr(link_new);
962 if (rc)
963 goto out_err;
964 rc = smcr_buf_reg_lgr(link_new);
965 if (rc)
966 goto out_err;
Karsten Graul57b49922020-05-03 14:38:44 +0200967 rc = smc_llc_srv_rkey_exchange(link, link_new);
Karsten Graul2d2209f2020-05-03 14:38:43 +0200968 if (rc)
969 goto out_err;
970 /* tbd: rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); */
971 if (rc)
972 goto out_err;
973 return 0;
974out_err:
975 smcr_link_clear(link_new);
976 return rc;
977}
978
979static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
980{
981 struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
982 int rc;
983
984 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
985
986 mutex_lock(&lgr->llc_conf_mutex);
987 rc = smc_llc_srv_add_link(link);
988 if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
989 /* delete any asymmetric link */
990 /* tbd: smc_llc_delete_asym_link(lgr); */
991 }
992 mutex_unlock(&lgr->llc_conf_mutex);
993}
994
Karsten Graulb45e7f92020-05-01 12:48:13 +0200995/* worker to process an add link message */
996static void smc_llc_add_link_work(struct work_struct *work)
997{
998 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
999 llc_add_link_work);
1000
1001 if (list_empty(&lgr->list)) {
1002 /* link group is terminating */
1003 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1004 goto out;
1005 }
1006
Karsten Graulb1570a82020-05-03 14:38:42 +02001007 if (lgr->role == SMC_CLNT)
1008 smc_llc_process_cli_add_link(lgr);
Karsten Graul2d2209f2020-05-03 14:38:43 +02001009 else
1010 smc_llc_process_srv_add_link(lgr);
Karsten Graulb45e7f92020-05-01 12:48:13 +02001011out:
1012 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1013}
1014
Karsten Graul52bedf32018-03-01 13:51:32 +01001015static void smc_llc_rx_delete_link(struct smc_link *link,
1016 struct smc_llc_msg_del_link *llc)
1017{
Stefan Raspl00e5fb22018-07-23 13:53:10 +02001018 struct smc_link_group *lgr = smc_get_lgr(link);
Karsten Graul52bedf32018-03-01 13:51:32 +01001019
Karsten Graulef79d432020-04-29 17:10:47 +02001020 smc_lgr_forget(lgr);
Karsten Graulef79d432020-04-29 17:10:47 +02001021 if (lgr->role == SMC_SERV) {
1022 /* client asks to delete this link, send request */
Karsten Graulfbed3b32020-05-01 12:48:04 +02001023 smc_llc_send_delete_link(link, 0, SMC_LLC_REQ, true,
1024 SMC_LLC_DEL_PROG_INIT_TERM);
Karsten Graul52bedf32018-03-01 13:51:32 +01001025 } else {
Karsten Graulef79d432020-04-29 17:10:47 +02001026 /* server requests to delete this link, send response */
Karsten Graulfbed3b32020-05-01 12:48:04 +02001027 smc_llc_send_delete_link(link, 0, SMC_LLC_RESP, true,
1028 SMC_LLC_DEL_PROG_INIT_TERM);
Karsten Graul52bedf32018-03-01 13:51:32 +01001029 }
Karsten Graul87523932020-05-01 12:48:09 +02001030 smcr_link_down_cond(link);
Karsten Graul52bedf32018-03-01 13:51:32 +01001031}
1032
Karsten Graul3bc67e02020-04-30 15:55:48 +02001033/* process a confirm_rkey request from peer, remote flow */
1034static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
Karsten Graul4ed75de2018-03-01 13:51:30 +01001035{
Karsten Graul3bc67e02020-04-30 15:55:48 +02001036 struct smc_llc_msg_confirm_rkey *llc;
1037 struct smc_llc_qentry *qentry;
1038 struct smc_link *link;
1039 int num_entries;
1040 int rk_idx;
1041 int i;
Karsten Graul4ed75de2018-03-01 13:51:30 +01001042
Karsten Graul3bc67e02020-04-30 15:55:48 +02001043 qentry = lgr->llc_flow_rmt.qentry;
1044 llc = &qentry->msg.confirm_rkey;
1045 link = qentry->link;
Karsten Graul4ed75de2018-03-01 13:51:30 +01001046
Karsten Graul3bc67e02020-04-30 15:55:48 +02001047 num_entries = llc->rtoken[0].num_rkeys;
1048 /* first rkey entry is for receiving link */
1049 rk_idx = smc_rtoken_add(link,
1050 llc->rtoken[0].rmb_vaddr,
1051 llc->rtoken[0].rmb_key);
1052 if (rk_idx < 0)
1053 goto out_err;
Karsten Graul4ed75de2018-03-01 13:51:30 +01001054
Karsten Graul3bc67e02020-04-30 15:55:48 +02001055 for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
1056 smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
1057 llc->rtoken[i].rmb_vaddr,
1058 llc->rtoken[i].rmb_key);
1059 /* max links is 3 so there is no need to support conf_rkey_cont msgs */
1060 goto out;
1061out_err:
1062 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1063 llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
1064out:
Karsten Graulef79d432020-04-29 17:10:47 +02001065 llc->hd.flags |= SMC_LLC_FLAG_RESP;
Karsten Graul3bc67e02020-04-30 15:55:48 +02001066 smc_llc_send_message(link, &qentry->msg);
1067 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
Karsten Graul4ed75de2018-03-01 13:51:30 +01001068}
1069
Karsten Graul218b24f2020-04-30 15:55:49 +02001070/* process a delete_rkey request from peer, remote flow */
1071static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
Karsten Graul4ed75de2018-03-01 13:51:30 +01001072{
Karsten Graul218b24f2020-04-30 15:55:49 +02001073 struct smc_llc_msg_delete_rkey *llc;
1074 struct smc_llc_qentry *qentry;
1075 struct smc_link *link;
Karsten Graul4ed75de2018-03-01 13:51:30 +01001076 u8 err_mask = 0;
1077 int i, max;
1078
Karsten Graul218b24f2020-04-30 15:55:49 +02001079 qentry = lgr->llc_flow_rmt.qentry;
1080 llc = &qentry->msg.delete_rkey;
1081 link = qentry->link;
1082
Karsten Graulef79d432020-04-29 17:10:47 +02001083 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
1084 for (i = 0; i < max; i++) {
1085 if (smc_rtoken_delete(link, llc->rkey[i]))
1086 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
Karsten Graul4ed75de2018-03-01 13:51:30 +01001087 }
Karsten Graulef79d432020-04-29 17:10:47 +02001088 if (err_mask) {
1089 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1090 llc->err_mask = err_mask;
1091 }
Karsten Graul218b24f2020-04-30 15:55:49 +02001092 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1093 smc_llc_send_message(link, &qentry->msg);
1094 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1095}
Karsten Graulef79d432020-04-29 17:10:47 +02001096
Karsten Graul6c8968c2020-04-29 17:10:46 +02001097/* flush the llc event queue */
Karsten Graul00a049c2020-04-29 17:10:49 +02001098static void smc_llc_event_flush(struct smc_link_group *lgr)
Ursula Braun9bf9abe2017-01-09 16:55:21 +01001099{
Karsten Graul6c8968c2020-04-29 17:10:46 +02001100 struct smc_llc_qentry *qentry, *q;
Ursula Braun9bf9abe2017-01-09 16:55:21 +01001101
Karsten Graul6c8968c2020-04-29 17:10:46 +02001102 spin_lock_bh(&lgr->llc_event_q_lock);
1103 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
1104 list_del_init(&qentry->list);
1105 kfree(qentry);
1106 }
1107 spin_unlock_bh(&lgr->llc_event_q_lock);
1108}
1109
1110static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
1111{
1112 union smc_llc_msg *llc = &qentry->msg;
1113 struct smc_link *link = qentry->link;
Karsten Graul0fb0b022020-04-30 15:55:43 +02001114 struct smc_link_group *lgr = link->lgr;
Karsten Graul6c8968c2020-04-29 17:10:46 +02001115
Karsten Grauld854fcb2020-04-29 17:10:43 +02001116 if (!smc_link_usable(link))
Karsten Graul6c8968c2020-04-29 17:10:46 +02001117 goto out;
Karsten Graul313164d2018-03-01 13:51:29 +01001118
1119 switch (llc->raw.hdr.common.type) {
1120 case SMC_LLC_TEST_LINK:
Karsten Graul56e80912020-04-30 15:55:46 +02001121 llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
1122 smc_llc_send_message(link, llc);
Karsten Graul313164d2018-03-01 13:51:29 +01001123 break;
Karsten Graul52bedf32018-03-01 13:51:32 +01001124 case SMC_LLC_ADD_LINK:
Karsten Graul0fb0b022020-04-30 15:55:43 +02001125 if (list_empty(&lgr->list))
1126 goto out; /* lgr is terminating */
1127 if (lgr->role == SMC_CLNT) {
1128 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) {
1129 /* a flow is waiting for this message */
1130 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1131 qentry);
1132 wake_up_interruptible(&lgr->llc_waiter);
1133 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1134 qentry)) {
Karsten Graulb45e7f92020-05-01 12:48:13 +02001135 schedule_work(&lgr->llc_add_link_work);
Karsten Graul0fb0b022020-04-30 15:55:43 +02001136 }
1137 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1138 /* as smc server, handle client suggestion */
Karsten Graulb45e7f92020-05-01 12:48:13 +02001139 schedule_work(&lgr->llc_add_link_work);
Karsten Graul0fb0b022020-04-30 15:55:43 +02001140 }
1141 return;
1142 case SMC_LLC_CONFIRM_LINK:
Karsten Graul87f88cd2020-05-03 14:38:41 +02001143 case SMC_LLC_ADD_LINK_CONT:
Karsten Graul0fb0b022020-04-30 15:55:43 +02001144 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1145 /* a flow is waiting for this message */
1146 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1147 wake_up_interruptible(&lgr->llc_waiter);
1148 return;
1149 }
Karsten Graul52bedf32018-03-01 13:51:32 +01001150 break;
1151 case SMC_LLC_DELETE_LINK:
1152 smc_llc_rx_delete_link(link, &llc->delete_link);
1153 break;
Karsten Graul4ed75de2018-03-01 13:51:30 +01001154 case SMC_LLC_CONFIRM_RKEY:
Karsten Graul3bc67e02020-04-30 15:55:48 +02001155 /* new request from remote, assign to remote flow */
1156 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1157 /* process here, does not wait for more llc msgs */
1158 smc_llc_rmt_conf_rkey(lgr);
1159 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1160 }
1161 return;
Karsten Graul4ed75de2018-03-01 13:51:30 +01001162 case SMC_LLC_CONFIRM_RKEY_CONT:
Karsten Graul42d18ac2020-04-30 15:55:50 +02001163 /* not used because max links is 3, and 3 rkeys fit into
1164 * one CONFIRM_RKEY message
1165 */
Karsten Graul4ed75de2018-03-01 13:51:30 +01001166 break;
1167 case SMC_LLC_DELETE_RKEY:
Karsten Graul218b24f2020-04-30 15:55:49 +02001168 /* new request from remote, assign to remote flow */
1169 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1170 /* process here, does not wait for more llc msgs */
1171 smc_llc_rmt_delete_rkey(lgr);
1172 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1173 }
1174 return;
Karsten Graul313164d2018-03-01 13:51:29 +01001175 }
Karsten Graul6c8968c2020-04-29 17:10:46 +02001176out:
1177 kfree(qentry);
1178}
1179
1180/* worker to process llc messages on the event queue */
1181static void smc_llc_event_work(struct work_struct *work)
1182{
1183 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1184 llc_event_work);
1185 struct smc_llc_qentry *qentry;
1186
Karsten Graul555da9a2020-04-30 15:55:38 +02001187 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
1188 if (smc_link_usable(lgr->delayed_event->link)) {
1189 smc_llc_event_handler(lgr->delayed_event);
1190 } else {
1191 qentry = lgr->delayed_event;
1192 lgr->delayed_event = NULL;
1193 kfree(qentry);
1194 }
1195 }
1196
Karsten Graul6c8968c2020-04-29 17:10:46 +02001197again:
1198 spin_lock_bh(&lgr->llc_event_q_lock);
1199 if (!list_empty(&lgr->llc_event_q)) {
1200 qentry = list_first_entry(&lgr->llc_event_q,
1201 struct smc_llc_qentry, list);
1202 list_del_init(&qentry->list);
1203 spin_unlock_bh(&lgr->llc_event_q_lock);
1204 smc_llc_event_handler(qentry);
1205 goto again;
1206 }
1207 spin_unlock_bh(&lgr->llc_event_q_lock);
1208}
1209
Karsten Graulef79d432020-04-29 17:10:47 +02001210/* process llc responses in tasklet context */
Karsten Graula6688d92020-04-30 15:55:39 +02001211static void smc_llc_rx_response(struct smc_link *link,
1212 struct smc_llc_qentry *qentry)
Karsten Graulef79d432020-04-29 17:10:47 +02001213{
Karsten Graula6688d92020-04-30 15:55:39 +02001214 u8 llc_type = qentry->msg.raw.hdr.common.type;
Karsten Graulef79d432020-04-29 17:10:47 +02001215
Karsten Graula6688d92020-04-30 15:55:39 +02001216 switch (llc_type) {
Karsten Graulef79d432020-04-29 17:10:47 +02001217 case SMC_LLC_TEST_LINK:
1218 if (link->state == SMC_LNK_ACTIVE)
1219 complete(&link->llc_testlink_resp);
1220 break;
Karsten Graulef79d432020-04-29 17:10:47 +02001221 case SMC_LLC_ADD_LINK:
Karsten Graul4667bb42020-04-30 15:55:42 +02001222 case SMC_LLC_CONFIRM_LINK:
Karsten Graul87f88cd2020-05-03 14:38:41 +02001223 case SMC_LLC_ADD_LINK_CONT:
Karsten Graul3d88a212020-04-30 15:55:44 +02001224 case SMC_LLC_CONFIRM_RKEY:
Karsten Graul6d74c3a2020-04-30 15:55:45 +02001225 case SMC_LLC_DELETE_RKEY:
Karsten Graul4667bb42020-04-30 15:55:42 +02001226 /* assign responses to the local flow, we requested them */
1227 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
1228 wake_up_interruptible(&link->lgr->llc_waiter);
1229 return;
Karsten Graulef79d432020-04-29 17:10:47 +02001230 case SMC_LLC_DELETE_LINK:
1231 if (link->lgr->role == SMC_SERV)
1232 smc_lgr_schedule_free_work_fast(link->lgr);
1233 break;
Karsten Graulef79d432020-04-29 17:10:47 +02001234 case SMC_LLC_CONFIRM_RKEY_CONT:
Karsten Graul42d18ac2020-04-30 15:55:50 +02001235 /* not used because max links is 3 */
Karsten Graulef79d432020-04-29 17:10:47 +02001236 break;
Karsten Graulef79d432020-04-29 17:10:47 +02001237 }
Karsten Graula6688d92020-04-30 15:55:39 +02001238 kfree(qentry);
Karsten Graulef79d432020-04-29 17:10:47 +02001239}
1240
Karsten Graula6688d92020-04-30 15:55:39 +02001241static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
Karsten Graul6c8968c2020-04-29 17:10:46 +02001242{
Karsten Graul6c8968c2020-04-29 17:10:46 +02001243 struct smc_link_group *lgr = link->lgr;
1244 struct smc_llc_qentry *qentry;
Karsten Graul6c8968c2020-04-29 17:10:46 +02001245 unsigned long flags;
1246
Karsten Graul6c8968c2020-04-29 17:10:46 +02001247 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
1248 if (!qentry)
1249 return;
1250 qentry->link = link;
1251 INIT_LIST_HEAD(&qentry->list);
1252 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
Karsten Graula6688d92020-04-30 15:55:39 +02001253
1254 /* process responses immediately */
1255 if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) {
1256 smc_llc_rx_response(link, qentry);
1257 return;
1258 }
1259
1260 /* add requests to event queue */
Karsten Graul6c8968c2020-04-29 17:10:46 +02001261 spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
1262 list_add_tail(&qentry->list, &lgr->llc_event_q);
1263 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
1264 schedule_work(&link->lgr->llc_event_work);
Ursula Braun9bf9abe2017-01-09 16:55:21 +01001265}
1266
Karsten Graula6688d92020-04-30 15:55:39 +02001267/* copy received msg and add it to the event queue */
1268static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
1269{
1270 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
1271 union smc_llc_msg *llc = buf;
1272
1273 if (wc->byte_len < sizeof(*llc))
1274 return; /* short message */
1275 if (llc->raw.hdr.length != sizeof(*llc))
1276 return; /* invalid message */
1277
1278 smc_llc_enqueue(link, llc);
1279}
1280
Karsten Graul44aa81c2018-05-15 17:04:55 +02001281/***************************** worker, utils *********************************/
Karsten Graul877ae5b2018-05-02 16:56:44 +02001282
1283static void smc_llc_testlink_work(struct work_struct *work)
1284{
1285 struct smc_link *link = container_of(to_delayed_work(work),
1286 struct smc_link, llc_testlink_wrk);
1287 unsigned long next_interval;
Karsten Graul877ae5b2018-05-02 16:56:44 +02001288 unsigned long expire_time;
1289 u8 user_data[16] = { 0 };
1290 int rc;
1291
Karsten Graul877ae5b2018-05-02 16:56:44 +02001292 if (link->state != SMC_LNK_ACTIVE)
1293 return; /* don't reschedule worker */
1294 expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
1295 if (time_is_after_jiffies(expire_time)) {
1296 next_interval = expire_time - jiffies;
1297 goto out;
1298 }
1299 reinit_completion(&link->llc_testlink_resp);
Karsten Grauld97935f2018-05-15 17:04:57 +02001300 smc_llc_send_test_link(link, user_data);
Karsten Graul877ae5b2018-05-02 16:56:44 +02001301 /* receive TEST LINK response over RoCE fabric */
1302 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
1303 SMC_LLC_WAIT_TIME);
Karsten Graul1020e1e2020-04-29 17:10:44 +02001304 if (link->state != SMC_LNK_ACTIVE)
1305 return; /* link state changed */
Karsten Graul877ae5b2018-05-02 16:56:44 +02001306 if (rc <= 0) {
Karsten Graul87523932020-05-01 12:48:09 +02001307 smcr_link_down_cond_sched(link);
Karsten Graul877ae5b2018-05-02 16:56:44 +02001308 return;
1309 }
1310 next_interval = link->llc_testlink_time;
1311out:
Karsten Graul1020e1e2020-04-29 17:10:44 +02001312 schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
Karsten Graul877ae5b2018-05-02 16:56:44 +02001313}
1314
Karsten Graul00a049c2020-04-29 17:10:49 +02001315void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
1316{
1317 struct net *net = sock_net(smc->clcsock->sk);
1318
1319 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
Karsten Graulb45e7f92020-05-01 12:48:13 +02001320 INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work);
Karsten Graul00a049c2020-04-29 17:10:49 +02001321 INIT_LIST_HEAD(&lgr->llc_event_q);
1322 spin_lock_init(&lgr->llc_event_q_lock);
Karsten Graul555da9a2020-04-30 15:55:38 +02001323 spin_lock_init(&lgr->llc_flow_lock);
1324 init_waitqueue_head(&lgr->llc_waiter);
Karsten Grauld5500662020-05-01 12:48:05 +02001325 mutex_init(&lgr->llc_conf_mutex);
Karsten Graul00a049c2020-04-29 17:10:49 +02001326 lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
1327}
1328
1329/* called after lgr was removed from lgr_list */
1330void smc_llc_lgr_clear(struct smc_link_group *lgr)
1331{
1332 smc_llc_event_flush(lgr);
Karsten Graul555da9a2020-04-30 15:55:38 +02001333 wake_up_interruptible_all(&lgr->llc_waiter);
Karsten Graul00a049c2020-04-29 17:10:49 +02001334 cancel_work_sync(&lgr->llc_event_work);
Karsten Graulb45e7f92020-05-01 12:48:13 +02001335 cancel_work_sync(&lgr->llc_add_link_work);
Karsten Graul555da9a2020-04-30 15:55:38 +02001336 if (lgr->delayed_event) {
1337 kfree(lgr->delayed_event);
1338 lgr->delayed_event = NULL;
1339 }
Karsten Graul00a049c2020-04-29 17:10:49 +02001340}
1341
Karsten Graul2a4c57a2018-05-15 17:04:59 +02001342int smc_llc_link_init(struct smc_link *link)
Karsten Graul877ae5b2018-05-02 16:56:44 +02001343{
1344 init_completion(&link->llc_testlink_resp);
1345 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
Karsten Graul2a4c57a2018-05-15 17:04:59 +02001346 return 0;
Karsten Graulb32cf4a2018-05-15 17:04:58 +02001347}
1348
Karsten Graul00a049c2020-04-29 17:10:49 +02001349void smc_llc_link_active(struct smc_link *link)
Karsten Graulb32cf4a2018-05-15 17:04:58 +02001350{
Karsten Graul877ae5b2018-05-02 16:56:44 +02001351 link->state = SMC_LNK_ACTIVE;
Karsten Graul00a049c2020-04-29 17:10:49 +02001352 if (link->lgr->llc_testlink_time) {
1353 link->llc_testlink_time = link->lgr->llc_testlink_time * HZ;
Karsten Graul1020e1e2020-04-29 17:10:44 +02001354 schedule_delayed_work(&link->llc_testlink_wrk,
1355 link->llc_testlink_time);
Karsten Graul877ae5b2018-05-02 16:56:44 +02001356 }
1357}
1358
Karsten Graul877ae5b2018-05-02 16:56:44 +02001359/* called in worker context */
Karsten Graul2a4c57a2018-05-15 17:04:59 +02001360void smc_llc_link_clear(struct smc_link *link)
Karsten Graul877ae5b2018-05-02 16:56:44 +02001361{
Karsten Graul2140ac22020-04-29 17:10:45 +02001362 complete(&link->llc_testlink_resp);
1363 cancel_delayed_work_sync(&link->llc_testlink_wrk);
1364 smc_wr_wakeup_reg_wait(link);
1365 smc_wr_wakeup_tx_wait(link);
Karsten Graul877ae5b2018-05-02 16:56:44 +02001366}
1367
Karsten Graul3d88a212020-04-30 15:55:44 +02001368/* register a new rtoken at the remote peer (for all links) */
1369int smc_llc_do_confirm_rkey(struct smc_link *send_link,
Karsten Graul44aa81c2018-05-15 17:04:55 +02001370 struct smc_buf_desc *rmb_desc)
1371{
Karsten Graul3d88a212020-04-30 15:55:44 +02001372 struct smc_link_group *lgr = send_link->lgr;
1373 struct smc_llc_qentry *qentry = NULL;
1374 int rc = 0;
Karsten Graul44aa81c2018-05-15 17:04:55 +02001375
Karsten Graul3d88a212020-04-30 15:55:44 +02001376 rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
1377 if (rc)
1378 goto out;
Karsten Graul44aa81c2018-05-15 17:04:55 +02001379 /* receive CONFIRM RKEY response from server over RoCE fabric */
Karsten Graul3d88a212020-04-30 15:55:44 +02001380 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
1381 SMC_LLC_CONFIRM_RKEY);
1382 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
1383 rc = -EFAULT;
1384out:
1385 if (qentry)
1386 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
Karsten Graul3d88a212020-04-30 15:55:44 +02001387 return rc;
Karsten Graul44aa81c2018-05-15 17:04:55 +02001388}
1389
Karsten Graul60e03c62018-11-22 10:26:42 +01001390/* unregister an rtoken at the remote peer */
Karsten Graul6d74c3a2020-04-30 15:55:45 +02001391int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
Karsten Graul60e03c62018-11-22 10:26:42 +01001392 struct smc_buf_desc *rmb_desc)
1393{
Karsten Graul6d74c3a2020-04-30 15:55:45 +02001394 struct smc_llc_qentry *qentry = NULL;
1395 struct smc_link *send_link;
Ursula Braun0b29ec62019-11-14 13:02:47 +01001396 int rc = 0;
Karsten Graul60e03c62018-11-22 10:26:42 +01001397
Karsten Graul6d74c3a2020-04-30 15:55:45 +02001398 send_link = smc_llc_usable_link(lgr);
1399 if (!send_link)
1400 return -ENOLINK;
1401
Karsten Graul6d74c3a2020-04-30 15:55:45 +02001402 /* protected by llc_flow control */
1403 rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
Karsten Graul60e03c62018-11-22 10:26:42 +01001404 if (rc)
1405 goto out;
1406 /* receive DELETE RKEY response from server over RoCE fabric */
Karsten Graul6d74c3a2020-04-30 15:55:45 +02001407 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
1408 SMC_LLC_DELETE_RKEY);
1409 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
Karsten Graul60e03c62018-11-22 10:26:42 +01001410 rc = -EFAULT;
Karsten Graul60e03c62018-11-22 10:26:42 +01001411out:
Karsten Graul6d74c3a2020-04-30 15:55:45 +02001412 if (qentry)
1413 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
Karsten Graul60e03c62018-11-22 10:26:42 +01001414 return rc;
1415}
1416
Karsten Graul92334cf2020-04-30 15:55:41 +02001417/* evaluate confirm link request or response */
1418int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
1419 enum smc_llc_reqresp type)
1420{
1421 if (type == SMC_LLC_REQ) /* SMC server assigns link_id */
1422 qentry->link->link_id = qentry->msg.confirm_link.link_num;
1423 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
1424 return -ENOTSUPP;
1425 return 0;
1426}
1427
Ursula Braun9bf9abe2017-01-09 16:55:21 +01001428/***************************** init, exit, misc ******************************/
1429
1430static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
1431 {
1432 .handler = smc_llc_rx_handler,
1433 .type = SMC_LLC_CONFIRM_LINK
1434 },
1435 {
Karsten Graul313164d2018-03-01 13:51:29 +01001436 .handler = smc_llc_rx_handler,
1437 .type = SMC_LLC_TEST_LINK
1438 },
1439 {
Karsten Graul4ed75de2018-03-01 13:51:30 +01001440 .handler = smc_llc_rx_handler,
Karsten Graul52bedf32018-03-01 13:51:32 +01001441 .type = SMC_LLC_ADD_LINK
1442 },
1443 {
1444 .handler = smc_llc_rx_handler,
Karsten Graul87f88cd2020-05-03 14:38:41 +02001445 .type = SMC_LLC_ADD_LINK_CONT
1446 },
1447 {
1448 .handler = smc_llc_rx_handler,
Karsten Graul52bedf32018-03-01 13:51:32 +01001449 .type = SMC_LLC_DELETE_LINK
1450 },
1451 {
1452 .handler = smc_llc_rx_handler,
Karsten Graul4ed75de2018-03-01 13:51:30 +01001453 .type = SMC_LLC_CONFIRM_RKEY
1454 },
1455 {
1456 .handler = smc_llc_rx_handler,
1457 .type = SMC_LLC_CONFIRM_RKEY_CONT
1458 },
1459 {
1460 .handler = smc_llc_rx_handler,
1461 .type = SMC_LLC_DELETE_RKEY
1462 },
1463 {
Ursula Braun9bf9abe2017-01-09 16:55:21 +01001464 .handler = NULL,
1465 }
1466};
1467
1468int __init smc_llc_init(void)
1469{
1470 struct smc_wr_rx_handler *handler;
1471 int rc = 0;
1472
1473 for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
1474 INIT_HLIST_NODE(&handler->list);
1475 rc = smc_wr_rx_register_handler(handler);
1476 if (rc)
1477 break;
1478 }
1479 return rc;
1480}