blob: b3d279d29c526a709a01caf05397542395868594 [file] [log] [blame]
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +02001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * SMC statistics netlink routines
6 *
7 * Copyright IBM Corp. 2021
8 *
9 * Author(s): Guvenc Gulce
10 */
11#include <linux/init.h>
12#include <linux/mutex.h>
13#include <linux/percpu.h>
14#include <linux/ctype.h>
Guvenc Gulce8c406022021-06-16 16:52:56 +020015#include <linux/smc.h>
16#include <net/genetlink.h>
17#include <net/sock.h>
18#include "smc_netlink.h"
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +020019#include "smc_stats.h"
20
21/* serialize fallback reason statistic gathering */
22DEFINE_MUTEX(smc_stat_fback_rsn);
23struct smc_stats __percpu *smc_stats; /* per cpu counters for SMC */
24struct smc_stats_reason fback_rsn;
25
26int __init smc_stats_init(void)
27{
28 memset(&fback_rsn, 0, sizeof(fback_rsn));
29 smc_stats = alloc_percpu(struct smc_stats);
30 if (!smc_stats)
31 return -ENOMEM;
32
33 return 0;
34}
35
36void smc_stats_exit(void)
37{
38 free_percpu(smc_stats);
39}
Guvenc Gulce8c406022021-06-16 16:52:56 +020040
41static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
42 struct smc_stats *stats, int tech,
43 int type)
44{
45 struct smc_stats_rmbcnt *stats_rmb_cnt;
46 struct nlattr *attrs;
47
48 if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
49 stats_rmb_cnt = &stats->smc[tech].rmb_tx;
50 else
51 stats_rmb_cnt = &stats->smc[tech].rmb_rx;
52
53 attrs = nla_nest_start(skb, type);
54 if (!attrs)
55 goto errout;
56 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
57 stats_rmb_cnt->reuse_cnt,
58 SMC_NLA_STATS_RMB_PAD))
59 goto errattr;
60 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
61 stats_rmb_cnt->buf_size_small_peer_cnt,
62 SMC_NLA_STATS_RMB_PAD))
63 goto errattr;
64 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
65 stats_rmb_cnt->buf_size_small_cnt,
66 SMC_NLA_STATS_RMB_PAD))
67 goto errattr;
68 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
69 stats_rmb_cnt->buf_full_peer_cnt,
70 SMC_NLA_STATS_RMB_PAD))
71 goto errattr;
72 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
73 stats_rmb_cnt->buf_full_cnt,
74 SMC_NLA_STATS_RMB_PAD))
75 goto errattr;
76 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
77 stats_rmb_cnt->alloc_cnt,
78 SMC_NLA_STATS_RMB_PAD))
79 goto errattr;
80 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
81 stats_rmb_cnt->dgrade_cnt,
82 SMC_NLA_STATS_RMB_PAD))
83 goto errattr;
84
85 nla_nest_end(skb, attrs);
86 return 0;
87
88errattr:
89 nla_nest_cancel(skb, attrs);
90errout:
91 return -EMSGSIZE;
92}
93
94static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
95 struct smc_stats *stats, int tech,
96 int type)
97{
98 struct smc_stats_memsize *stats_pload;
99 struct nlattr *attrs;
100
101 if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
102 stats_pload = &stats->smc[tech].tx_pd;
103 else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
104 stats_pload = &stats->smc[tech].rx_pd;
105 else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
106 stats_pload = &stats->smc[tech].tx_rmbsize;
107 else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
108 stats_pload = &stats->smc[tech].rx_rmbsize;
109 else
110 goto errout;
111
112 attrs = nla_nest_start(skb, type);
113 if (!attrs)
114 goto errout;
115 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
116 stats_pload->buf[SMC_BUF_8K],
117 SMC_NLA_STATS_PLOAD_PAD))
118 goto errattr;
119 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
120 stats_pload->buf[SMC_BUF_16K],
121 SMC_NLA_STATS_PLOAD_PAD))
122 goto errattr;
123 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
124 stats_pload->buf[SMC_BUF_32K],
125 SMC_NLA_STATS_PLOAD_PAD))
126 goto errattr;
127 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
128 stats_pload->buf[SMC_BUF_64K],
129 SMC_NLA_STATS_PLOAD_PAD))
130 goto errattr;
131 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
132 stats_pload->buf[SMC_BUF_128K],
133 SMC_NLA_STATS_PLOAD_PAD))
134 goto errattr;
135 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
136 stats_pload->buf[SMC_BUF_256K],
137 SMC_NLA_STATS_PLOAD_PAD))
138 goto errattr;
139 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
140 stats_pload->buf[SMC_BUF_512K],
141 SMC_NLA_STATS_PLOAD_PAD))
142 goto errattr;
143 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
144 stats_pload->buf[SMC_BUF_1024K],
145 SMC_NLA_STATS_PLOAD_PAD))
146 goto errattr;
147 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
148 stats_pload->buf[SMC_BUF_G_1024K],
149 SMC_NLA_STATS_PLOAD_PAD))
150 goto errattr;
151
152 nla_nest_end(skb, attrs);
153 return 0;
154
155errattr:
156 nla_nest_cancel(skb, attrs);
157errout:
158 return -EMSGSIZE;
159}
160
161static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
162 struct smc_stats *stats, int tech)
163{
164 struct smc_stats_tech *smc_tech;
165 struct nlattr *attrs;
166
167 smc_tech = &stats->smc[tech];
168 if (tech == SMC_TYPE_D)
169 attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
170 else
171 attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
172
173 if (!attrs)
174 goto errout;
175 if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
176 SMC_NLA_STATS_T_TX_RMB_STATS))
177 goto errattr;
178 if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
179 SMC_NLA_STATS_T_RX_RMB_STATS))
180 goto errattr;
181 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
182 SMC_NLA_STATS_T_TXPLOAD_SIZE))
183 goto errattr;
184 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
185 SMC_NLA_STATS_T_RXPLOAD_SIZE))
186 goto errattr;
187 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
188 SMC_NLA_STATS_T_TX_RMB_SIZE))
189 goto errattr;
190 if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
191 SMC_NLA_STATS_T_RX_RMB_SIZE))
192 goto errattr;
193 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
194 smc_tech->clnt_v1_succ_cnt,
195 SMC_NLA_STATS_PAD))
196 goto errattr;
197 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
198 smc_tech->clnt_v2_succ_cnt,
199 SMC_NLA_STATS_PAD))
200 goto errattr;
201 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
202 smc_tech->srv_v1_succ_cnt,
203 SMC_NLA_STATS_PAD))
204 goto errattr;
205 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
206 smc_tech->srv_v2_succ_cnt,
207 SMC_NLA_STATS_PAD))
208 goto errattr;
209 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
210 smc_tech->rx_bytes,
211 SMC_NLA_STATS_PAD))
212 goto errattr;
213 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
214 smc_tech->tx_bytes,
215 SMC_NLA_STATS_PAD))
216 goto errattr;
217 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
218 smc_tech->rx_cnt,
219 SMC_NLA_STATS_PAD))
220 goto errattr;
221 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
222 smc_tech->tx_cnt,
223 SMC_NLA_STATS_PAD))
224 goto errattr;
225 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
226 smc_tech->sendpage_cnt,
227 SMC_NLA_STATS_PAD))
228 goto errattr;
229 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
230 smc_tech->cork_cnt,
231 SMC_NLA_STATS_PAD))
232 goto errattr;
233 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
234 smc_tech->ndly_cnt,
235 SMC_NLA_STATS_PAD))
236 goto errattr;
237 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
238 smc_tech->splice_cnt,
239 SMC_NLA_STATS_PAD))
240 goto errattr;
241 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
242 smc_tech->urg_data_cnt,
243 SMC_NLA_STATS_PAD))
244 goto errattr;
245
246 nla_nest_end(skb, attrs);
247 return 0;
248
249errattr:
250 nla_nest_cancel(skb, attrs);
251errout:
252 return -EMSGSIZE;
253}
254
255int smc_nl_get_stats(struct sk_buff *skb,
256 struct netlink_callback *cb)
257{
258 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
259 struct smc_stats *stats;
260 struct nlattr *attrs;
261 int cpu, i, size;
262 void *nlh;
263 u64 *src;
264 u64 *sum;
265
266 if (cb_ctx->pos[0])
267 goto errmsg;
268 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
269 &smc_gen_nl_family, NLM_F_MULTI,
270 SMC_NETLINK_GET_STATS);
271 if (!nlh)
272 goto errmsg;
273
274 attrs = nla_nest_start(skb, SMC_GEN_STATS);
275 if (!attrs)
276 goto errnest;
277 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
278 if (!stats)
279 goto erralloc;
280 size = sizeof(*stats) / sizeof(u64);
281 for_each_possible_cpu(cpu) {
282 src = (u64 *)per_cpu_ptr(smc_stats, cpu);
283 sum = (u64 *)stats;
284 for (i = 0; i < size; i++)
285 *(sum++) += *(src++);
286 }
287 if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
288 goto errattr;
289 if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
290 goto errattr;
291 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
292 stats->clnt_hshake_err_cnt,
293 SMC_NLA_STATS_PAD))
294 goto errattr;
295 if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
296 stats->srv_hshake_err_cnt,
297 SMC_NLA_STATS_PAD))
298 goto errattr;
299
300 nla_nest_end(skb, attrs);
301 genlmsg_end(skb, nlh);
302 cb_ctx->pos[0] = 1;
303 kfree(stats);
304 return skb->len;
305
306errattr:
307 kfree(stats);
308erralloc:
309 nla_nest_cancel(skb, attrs);
310errnest:
311 genlmsg_cancel(skb, nlh);
312errmsg:
313 return skb->len;
314}
Guvenc Gulcef0dd7bf2021-06-16 16:52:57 +0200315
316static int smc_nl_get_fback_details(struct sk_buff *skb,
317 struct netlink_callback *cb, int pos,
318 bool is_srv)
319{
320 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
321 int cnt_reported = cb_ctx->pos[2];
322 struct smc_stats_fback *trgt_arr;
323 struct nlattr *attrs;
324 int rc = 0;
325 void *nlh;
326
327 if (is_srv)
328 trgt_arr = &fback_rsn.srv[0];
329 else
330 trgt_arr = &fback_rsn.clnt[0];
331 if (!trgt_arr[pos].fback_code)
332 return -ENODATA;
333 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
334 &smc_gen_nl_family, NLM_F_MULTI,
335 SMC_NETLINK_GET_FBACK_STATS);
336 if (!nlh)
337 goto errmsg;
338 attrs = nla_nest_start(skb, SMC_GEN_FBACK_STATS);
339 if (!attrs)
340 goto errout;
341 if (nla_put_u8(skb, SMC_NLA_FBACK_STATS_TYPE, is_srv))
342 goto errattr;
343 if (!cnt_reported) {
344 if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_SRV_CNT,
345 fback_rsn.srv_fback_cnt,
346 SMC_NLA_FBACK_STATS_PAD))
347 goto errattr;
348 if (nla_put_u64_64bit(skb, SMC_NLA_FBACK_STATS_CLNT_CNT,
349 fback_rsn.clnt_fback_cnt,
350 SMC_NLA_FBACK_STATS_PAD))
351 goto errattr;
352 cnt_reported = 1;
353 }
354
355 if (nla_put_u32(skb, SMC_NLA_FBACK_STATS_RSN_CODE,
356 trgt_arr[pos].fback_code))
357 goto errattr;
358 if (nla_put_u16(skb, SMC_NLA_FBACK_STATS_RSN_CNT,
359 trgt_arr[pos].count))
360 goto errattr;
361
362 cb_ctx->pos[2] = cnt_reported;
363 nla_nest_end(skb, attrs);
364 genlmsg_end(skb, nlh);
365 return rc;
366
367errattr:
368 nla_nest_cancel(skb, attrs);
369errout:
370 genlmsg_cancel(skb, nlh);
371errmsg:
372 return -EMSGSIZE;
373}
374
375int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb)
376{
377 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
378 int rc_srv = 0, rc_clnt = 0, k;
379 int skip_serv = cb_ctx->pos[1];
380 int snum = cb_ctx->pos[0];
381 bool is_srv = true;
382
383 mutex_lock(&smc_stat_fback_rsn);
384 for (k = 0; k < SMC_MAX_FBACK_RSN_CNT; k++) {
385 if (k < snum)
386 continue;
387 if (!skip_serv) {
388 rc_srv = smc_nl_get_fback_details(skb, cb, k, is_srv);
389 if (rc_srv && rc_srv != ENODATA)
390 break;
391 } else {
392 skip_serv = 0;
393 }
394 rc_clnt = smc_nl_get_fback_details(skb, cb, k, !is_srv);
395 if (rc_clnt && rc_clnt != ENODATA) {
396 skip_serv = 1;
397 break;
398 }
399 if (rc_clnt == ENODATA && rc_srv == ENODATA)
400 break;
401 }
402 mutex_unlock(&smc_stat_fback_rsn);
403 cb_ctx->pos[1] = skip_serv;
404 cb_ctx->pos[0] = k;
405 return skb->len;
406}