blob: f15fca59b4b2654251a375bc522d88a12b450ccc [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Ursula Braunf16a7dd2017-01-09 16:55:26 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Monitoring SMC transport protocol sockets
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/init.h>
16#include <linux/sock_diag.h>
17#include <linux/inet_diag.h>
18#include <linux/smc_diag.h>
19#include <net/netlink.h>
20#include <net/smc.h>
21
22#include "smc.h"
23#include "smc_core.h"
24
Guvenc Gulce8418cb42020-09-10 18:48:28 +020025struct smc_diag_dump_ctx {
26 int pos[2];
27};
28
29static struct smc_diag_dump_ctx *smc_dump_context(struct netlink_callback *cb)
30{
31 return (struct smc_diag_dump_ctx *)cb->ctx;
32}
33
Ursula Braunf16a7dd2017-01-09 16:55:26 +010034static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
35{
36 sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
37 be16_to_cpu(((__be16 *)gid_raw)[0]),
38 be16_to_cpu(((__be16 *)gid_raw)[1]),
39 be16_to_cpu(((__be16 *)gid_raw)[2]),
40 be16_to_cpu(((__be16 *)gid_raw)[3]),
41 be16_to_cpu(((__be16 *)gid_raw)[4]),
42 be16_to_cpu(((__be16 *)gid_raw)[5]),
43 be16_to_cpu(((__be16 *)gid_raw)[6]),
44 be16_to_cpu(((__be16 *)gid_raw)[7]));
45}
46
47static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
48{
49 struct smc_sock *smc = smc_sk(sk);
50
Eric Dumazet457fed72020-02-10 11:36:13 -080051 memset(r, 0, sizeof(*r));
Karsten Graul232dc8e2019-02-07 15:56:20 +010052 r->diag_family = sk->sk_family;
Eric Dumazet457fed72020-02-10 11:36:13 -080053 sock_diag_save_cookie(sk, r->id.idiag_cookie);
Ursula Braunf16a7dd2017-01-09 16:55:26 +010054 if (!smc->clcsock)
55 return;
56 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
57 r->id.idiag_dport = smc->clcsock->sk->sk_dport;
58 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
Karsten Grauled759862018-05-02 16:56:45 +020059 if (sk->sk_protocol == SMCPROTO_SMC) {
Karsten Grauled759862018-05-02 16:56:45 +020060 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
61 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
62#if IS_ENABLED(CONFIG_IPV6)
63 } else if (sk->sk_protocol == SMCPROTO_SMC6) {
Karsten Grauled759862018-05-02 16:56:45 +020064 memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
65 sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
66 memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
67 sizeof(smc->clcsock->sk->sk_v6_daddr));
68#endif
69 }
Ursula Braunf16a7dd2017-01-09 16:55:26 +010070}
71
72static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
73 struct smc_diag_msg *r,
74 struct user_namespace *user_ns)
75{
76 if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
77 return 1;
78
79 r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
80 r->diag_inode = sock_i_ino(sk);
81 return 0;
82}
83
84static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
85 struct netlink_callback *cb,
86 const struct smc_diag_req *req,
87 struct nlattr *bc)
88{
89 struct smc_sock *smc = smc_sk(sk);
Karsten Graul603cc142018-07-25 16:35:32 +020090 struct smc_diag_fallback fallback;
Ursula Braunf16a7dd2017-01-09 16:55:26 +010091 struct user_namespace *user_ns;
92 struct smc_diag_msg *r;
93 struct nlmsghdr *nlh;
94
95 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
96 cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
97 if (!nlh)
98 return -EMSGSIZE;
99
100 r = nlmsg_data(nlh);
101 smc_diag_msg_common_fill(r, sk);
102 r->diag_state = sk->sk_state;
Karsten Graulc6011712018-07-23 13:53:08 +0200103 if (smc->use_fallback)
104 r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP;
105 else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
106 r->diag_mode = SMC_DIAG_MODE_SMCD;
107 else
108 r->diag_mode = SMC_DIAG_MODE_SMCR;
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100109 user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
110 if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
111 goto errout;
112
Karsten Graul603cc142018-07-25 16:35:32 +0200113 fallback.reason = smc->fallback_rsn;
114 fallback.peer_diagnosis = smc->peer_diagnosis;
115 if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
116 goto errout;
117
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100118 if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
119 smc->conn.alert_token_local) {
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100120 struct smc_connection *conn = &smc->conn;
121 struct smc_diag_conninfo cinfo = {
122 .token = conn->alert_token_local,
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200123 .sndbuf_size = conn->sndbuf_desc ?
124 conn->sndbuf_desc->len : 0,
125 .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0,
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100126 .peer_rmbe_size = conn->peer_rmbe_size,
127
128 .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap,
129 .rx_prod.count = conn->local_rx_ctrl.prod.count,
130 .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap,
131 .rx_cons.count = conn->local_rx_ctrl.cons.count,
132
133 .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap,
134 .tx_prod.count = conn->local_tx_ctrl.prod.count,
135 .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap,
136 .tx_cons.count = conn->local_tx_ctrl.cons.count,
137
138 .tx_prod_flags =
139 *(u8 *)&conn->local_tx_ctrl.prod_flags,
140 .tx_conn_state_flags =
141 *(u8 *)&conn->local_tx_ctrl.conn_state_flags,
142 .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags,
143 .rx_conn_state_flags =
144 *(u8 *)&conn->local_rx_ctrl.conn_state_flags,
145
146 .tx_prep.wrap = conn->tx_curs_prep.wrap,
147 .tx_prep.count = conn->tx_curs_prep.count,
148 .tx_sent.wrap = conn->tx_curs_sent.wrap,
149 .tx_sent.count = conn->tx_curs_sent.count,
150 .tx_fin.wrap = conn->tx_curs_fin.wrap,
151 .tx_fin.count = conn->tx_curs_fin.count,
152 };
153
154 if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
155 goto errout;
156 }
157
Hans Wippelc6ba7c92018-06-28 19:05:07 +0200158 if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
159 (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100160 !list_empty(&smc->conn.lgr->list)) {
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100161 struct smc_diag_lgrinfo linfo = {
162 .role = smc->conn.lgr->role,
163 .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
164 .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
165 };
166
167 memcpy(linfo.lnk[0].ibname,
168 smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
169 sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
170 smc_gid_be16_convert(linfo.lnk[0].gid,
Ursula Braun7005ada2018-07-25 16:35:31 +0200171 smc->conn.lgr->lnk[0].gid);
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100172 smc_gid_be16_convert(linfo.lnk[0].peer_gid,
173 smc->conn.lgr->lnk[0].peer_gid);
174
175 if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
176 goto errout;
177 }
Hans Wippel4b1b7d3b2018-06-28 19:05:12 +0200178 if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
179 (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
180 !list_empty(&smc->conn.lgr->list)) {
181 struct smc_connection *conn = &smc->conn;
Peilin Yece51f632020-08-20 16:30:52 +0200182 struct smcd_diag_dmbinfo dinfo;
183
184 memset(&dinfo, 0, sizeof(dinfo));
185
186 dinfo.linkid = *((u32 *)conn->lgr->id);
187 dinfo.peer_gid = conn->lgr->peer_gid;
188 dinfo.my_gid = conn->lgr->smcd->local_gid;
189 dinfo.token = conn->rmb_desc->token;
190 dinfo.peer_token = conn->peer_token;
Hans Wippel4b1b7d3b2018-06-28 19:05:12 +0200191
192 if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
193 goto errout;
194 }
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100195
196 nlmsg_end(skb, nlh);
197 return 0;
198
199errout:
200 nlmsg_cancel(skb, nlh);
201 return -EMSGSIZE;
202}
203
Karsten Grauled759862018-05-02 16:56:45 +0200204static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200205 struct netlink_callback *cb, int p_type)
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100206{
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200207 struct smc_diag_dump_ctx *cb_ctx = smc_dump_context(cb);
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100208 struct net *net = sock_net(skb->sk);
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200209 int snum = cb_ctx->pos[p_type];
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100210 struct nlattr *bc = NULL;
211 struct hlist_head *head;
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200212 int rc = 0, num = 0;
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100213 struct sock *sk;
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100214
Karsten Grauled759862018-05-02 16:56:45 +0200215 read_lock(&prot->h.smc_hash->lock);
216 head = &prot->h.smc_hash->ht;
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100217 if (hlist_empty(head))
218 goto out;
219
220 sk_for_each(sk, head) {
221 if (!net_eq(sock_net(sk), net))
222 continue;
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200223 if (num < snum)
224 goto next;
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100225 rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200226 if (rc < 0)
227 goto out;
228next:
229 num++;
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100230 }
231
232out:
Karsten Grauled759862018-05-02 16:56:45 +0200233 read_unlock(&prot->h.smc_hash->lock);
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200234 cb_ctx->pos[p_type] = num;
Karsten Grauled759862018-05-02 16:56:45 +0200235 return rc;
236}
237
238static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
239{
240 int rc = 0;
241
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200242 rc = smc_diag_dump_proto(&smc_proto, skb, cb, SMCPROTO_SMC);
Karsten Grauled759862018-05-02 16:56:45 +0200243 if (!rc)
Guvenc Gulce8418cb42020-09-10 18:48:28 +0200244 smc_diag_dump_proto(&smc_proto6, skb, cb, SMCPROTO_SMC6);
245 return skb->len;
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100246}
247
248static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
249{
250 struct net *net = sock_net(skb->sk);
251
252 if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
253 h->nlmsg_flags & NLM_F_DUMP) {
254 {
255 struct netlink_dump_control c = {
256 .dump = smc_diag_dump,
257 .min_dump_alloc = SKB_WITH_OVERHEAD(32768),
258 };
259 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
260 }
261 }
262 return 0;
263}
264
265static const struct sock_diag_handler smc_diag_handler = {
266 .family = AF_SMC,
267 .dump = smc_diag_handler_dump,
268};
269
270static int __init smc_diag_init(void)
271{
272 return sock_diag_register(&smc_diag_handler);
273}
274
275static void __exit smc_diag_exit(void)
276{
277 sock_diag_unregister(&smc_diag_handler);
278}
279
280module_init(smc_diag_init);
281module_exit(smc_diag_exit);
282MODULE_LICENSE("GPL");
283MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */);