blob: a54e90a1110fdb03449e150afe9420b0aa0d93a2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ursula Braunf38ba1792017-01-09 16:55:19 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Work Requests exploiting Infiniband API
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
10 */
11
12#ifndef SMC_WR_H
13#define SMC_WR_H
14
15#include <linux/atomic.h>
16#include <rdma/ib_verbs.h>
17#include <asm/div64.h>
18
19#include "smc.h"
20#include "smc_core.h"
21
Ursula Braunf38ba1792017-01-09 16:55:19 +010022#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
23
24#define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
Ursula Braunf38ba1792017-01-09 16:55:19 +010025
26#define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
27
28#define SMC_WR_TX_PEND_PRIV_SIZE 32
29
30struct smc_wr_tx_pend_priv {
31 u8 priv[SMC_WR_TX_PEND_PRIV_SIZE];
32};
33
34typedef void (*smc_wr_tx_handler)(struct smc_wr_tx_pend_priv *,
35 struct smc_link *,
36 enum ib_wc_status);
37
Ursula Braun5f083182017-01-09 16:55:22 +010038typedef bool (*smc_wr_tx_filter)(struct smc_wr_tx_pend_priv *,
39 unsigned long);
40
41typedef void (*smc_wr_tx_dismisser)(struct smc_wr_tx_pend_priv *);
42
Ursula Braunf38ba1792017-01-09 16:55:19 +010043struct smc_wr_rx_handler {
44 struct hlist_node list; /* hash table collision resolution */
45 void (*handler)(struct ib_wc *, void *);
46 u8 type;
47};
48
49/* Only used by RDMA write WRs.
50 * All other WRs (CDC/LLC) use smc_wr_tx_send handling WR_ID implicitly
51 */
52static inline long smc_wr_tx_get_next_wr_id(struct smc_link *link)
53{
54 return atomic_long_inc_return(&link->wr_tx_id);
55}
56
57static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
58{
59 atomic_long_set(wr_tx_id, val);
60}
61
Karsten Graul95f7f3e2021-10-07 16:14:40 +020062static inline bool smc_wr_tx_link_hold(struct smc_link *link)
63{
Dust Li90cee522021-12-28 17:03:24 +080064 if (!smc_link_sendable(link))
Karsten Graul95f7f3e2021-10-07 16:14:40 +020065 return false;
66 atomic_inc(&link->wr_tx_refcnt);
67 return true;
68}
69
70static inline void smc_wr_tx_link_put(struct smc_link *link)
71{
72 if (atomic_dec_and_test(&link->wr_tx_refcnt))
73 wake_up_all(&link->wr_tx_wait);
74}
75
Ursula Braun15e1b992019-11-14 13:02:44 +010076static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
77{
78 wake_up_all(&lnk->wr_tx_wait);
79}
80
81static inline void smc_wr_wakeup_reg_wait(struct smc_link *lnk)
82{
83 wake_up(&lnk->wr_reg_wait);
84}
85
Ursula Braunf38ba1792017-01-09 16:55:19 +010086/* post a new receive work request to fill a completed old work request entry */
87static inline int smc_wr_rx_post(struct smc_link *link)
88{
Ursula Braunf38ba1792017-01-09 16:55:19 +010089 int rc;
90 u64 wr_id, temp_wr_id;
91 u32 index;
92
93 wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */
94 temp_wr_id = wr_id;
95 index = do_div(temp_wr_id, link->wr_rx_cnt);
96 link->wr_rx_ibs[index].wr_id = wr_id;
Bart Van Assche2e3bbe42018-07-18 09:25:30 -070097 rc = ib_post_recv(link->roce_qp, &link->wr_rx_ibs[index], NULL);
Ursula Braunf38ba1792017-01-09 16:55:19 +010098 return rc;
99}
100
101int smc_wr_create_link(struct smc_link *lnk);
102int smc_wr_alloc_link_mem(struct smc_link *lnk);
Karsten Graul8799e312021-10-16 11:37:49 +0200103int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100104void smc_wr_free_link(struct smc_link *lnk);
105void smc_wr_free_link_mem(struct smc_link *lnk);
Karsten Graul8799e312021-10-16 11:37:49 +0200106void smc_wr_free_lgr_mem(struct smc_link_group *lgr);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100107void smc_wr_remember_qp_attr(struct smc_link *lnk);
108void smc_wr_remove_dev(struct smc_ib_device *smcibdev);
109void smc_wr_add_dev(struct smc_ib_device *smcibdev);
110
111int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
112 struct smc_wr_buf **wr_buf,
Ursula Braunad6f3172019-02-04 13:44:44 +0100113 struct smc_rdma_wr **wrs,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100114 struct smc_wr_tx_pend_priv **wr_pend_priv);
Karsten Graulb4ba4652021-10-16 11:37:50 +0200115int smc_wr_tx_get_v2_slot(struct smc_link *link,
116 smc_wr_tx_handler handler,
117 struct smc_wr_v2_buf **wr_buf,
118 struct smc_wr_tx_pend_priv **wr_pend_priv);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100119int smc_wr_tx_put_slot(struct smc_link *link,
120 struct smc_wr_tx_pend_priv *wr_pend_priv);
121int smc_wr_tx_send(struct smc_link *link,
122 struct smc_wr_tx_pend_priv *wr_pend_priv);
Karsten Graulb4ba4652021-10-16 11:37:50 +0200123int smc_wr_tx_v2_send(struct smc_link *link,
124 struct smc_wr_tx_pend_priv *priv, int len);
Karsten Graul09c61d22020-05-04 14:18:41 +0200125int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
126 unsigned long timeout);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100127void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
Dust Li349d4312021-12-28 17:03:25 +0800128void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100129
130int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
131int smc_wr_rx_post_init(struct smc_link *link);
132void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
Ursula Braun652a1e42017-07-28 13:56:17 +0200133int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100134
135#endif /* SMC_WR_H */