blob: 84b7ecd8c05ca8cc22b6777c9c15f45b49129a47 [file] [log] [blame]
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Macros for SMC statistics
6 *
7 * Copyright IBM Corp. 2021
8 *
9 * Author(s): Guvenc Gulce
10 */
11
12#ifndef NET_SMC_SMC_STATS_H_
13#define NET_SMC_SMC_STATS_H_
14#include <linux/init.h>
15#include <linux/mutex.h>
16#include <linux/percpu.h>
17#include <linux/ctype.h>
18#include <linux/smc.h>
19
20#include "smc_clc.h"
21
22#define SMC_MAX_FBACK_RSN_CNT 30
23
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +020024enum {
25 SMC_BUF_8K,
26 SMC_BUF_16K,
27 SMC_BUF_32K,
28 SMC_BUF_64K,
29 SMC_BUF_128K,
30 SMC_BUF_256K,
31 SMC_BUF_512K,
32 SMC_BUF_1024K,
33 SMC_BUF_G_1024K,
34 SMC_BUF_MAX,
35};
36
37struct smc_stats_fback {
38 int fback_code;
39 u16 count;
40};
41
Guvenc Gulce194730a2021-06-16 16:52:58 +020042struct smc_stats_rsn {
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +020043 struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
44 struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
45 u64 srv_fback_cnt;
46 u64 clnt_fback_cnt;
47};
48
49struct smc_stats_rmbcnt {
50 u64 buf_size_small_peer_cnt;
51 u64 buf_size_small_cnt;
52 u64 buf_full_peer_cnt;
53 u64 buf_full_cnt;
54 u64 reuse_cnt;
55 u64 alloc_cnt;
56 u64 dgrade_cnt;
57};
58
59struct smc_stats_memsize {
60 u64 buf[SMC_BUF_MAX];
61};
62
63struct smc_stats_tech {
64 struct smc_stats_memsize tx_rmbsize;
65 struct smc_stats_memsize rx_rmbsize;
66 struct smc_stats_memsize tx_pd;
67 struct smc_stats_memsize rx_pd;
68 struct smc_stats_rmbcnt rmb_tx;
69 struct smc_stats_rmbcnt rmb_rx;
70 u64 clnt_v1_succ_cnt;
71 u64 clnt_v2_succ_cnt;
72 u64 srv_v1_succ_cnt;
73 u64 srv_v2_succ_cnt;
74 u64 sendpage_cnt;
75 u64 urg_data_cnt;
76 u64 splice_cnt;
77 u64 cork_cnt;
78 u64 ndly_cnt;
79 u64 rx_bytes;
80 u64 tx_bytes;
81 u64 rx_cnt;
82 u64 tx_cnt;
83};
84
85struct smc_stats {
86 struct smc_stats_tech smc[2];
87 u64 clnt_hshake_err_cnt;
88 u64 srv_hshake_err_cnt;
89};
90
Guvenc Gulce194730a2021-06-16 16:52:58 +020091#define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +020092do { \
Guvenc Gulce194730a2021-06-16 16:52:58 +020093 typeof(_smc_stats) stats = (_smc_stats); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +020094 typeof(_tech) t = (_tech); \
95 typeof(_len) l = (_len); \
96 int _pos = fls64((l) >> 13); \
97 typeof(_rc) r = (_rc); \
98 int m = SMC_BUF_MAX - 1; \
Guvenc Gulce194730a2021-06-16 16:52:58 +020099 this_cpu_inc((*stats).smc[t].key ## _cnt); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200100 if (r <= 0) \
101 break; \
102 _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200103 this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
104 this_cpu_add((*stats).smc[t].key ## _bytes, r); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200105} \
106while (0)
107
108#define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
109do { \
110 typeof(_smc) __smc = _smc; \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200111 struct net *_net = sock_net(&__smc->sk); \
112 struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200113 typeof(length) _len = (length); \
114 typeof(rcode) _rc = (rcode); \
115 bool is_smcd = !__smc->conn.lnk; \
116 if (is_smcd) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200117 SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, tx, _len, _rc); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200118 else \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200119 SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, tx, _len, _rc); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200120} \
121while (0)
122
123#define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
124do { \
125 typeof(_smc) __smc = _smc; \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200126 struct net *_net = sock_net(&__smc->sk); \
127 struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200128 typeof(length) _len = (length); \
129 typeof(rcode) _rc = (rcode); \
130 bool is_smcd = !__smc->conn.lnk; \
131 if (is_smcd) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200132 SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, rx, _len, _rc); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200133 else \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200134 SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, rx, _len, _rc); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200135} \
136while (0)
137
Guvenc Gulce194730a2021-06-16 16:52:58 +0200138#define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len) \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200139do { \
140 typeof(_len) _l = (_len); \
141 typeof(_tech) t = (_tech); \
142 int _pos = fls((_l) >> 13); \
143 int m = SMC_BUF_MAX - 1; \
144 _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200145 this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200146} \
147while (0)
148
Guvenc Gulce194730a2021-06-16 16:52:58 +0200149#define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \
150 this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200151
Guvenc Gulce194730a2021-06-16 16:52:58 +0200152#define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _len) \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200153do { \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200154 struct net *_net = sock_net(&(_smc)->sk); \
155 struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200156 typeof(_is_smcd) is_d = (_is_smcd); \
157 typeof(_is_rx) is_r = (_is_rx); \
158 typeof(_len) l = (_len); \
159 if ((is_d) && (is_r)) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200160 SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, l); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200161 if ((is_d) && !(is_r)) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200162 SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, l); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200163 if (!(is_d) && (is_r)) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200164 SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, l); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200165 if (!(is_d) && !(is_r)) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200166 SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, l); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200167} \
168while (0)
169
Guvenc Gulce194730a2021-06-16 16:52:58 +0200170#define SMC_STAT_RMB(_smc, type, _is_smcd, _is_rx) \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200171do { \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200172 struct net *net = sock_net(&(_smc)->sk); \
173 struct smc_stats __percpu *_smc_stats = net->smc.smc_stats; \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200174 typeof(_is_smcd) is_d = (_is_smcd); \
175 typeof(_is_rx) is_r = (_is_rx); \
176 if ((is_d) && (is_r)) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200177 SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, rx); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200178 if ((is_d) && !(is_r)) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200179 SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, tx); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200180 if (!(is_d) && (is_r)) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200181 SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, rx); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200182 if (!(is_d) && !(is_r)) \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200183 SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, tx); \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200184} \
185while (0)
186
Guvenc Gulce194730a2021-06-16 16:52:58 +0200187#define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx) \
188 SMC_STAT_RMB(smc, reuse, is_smcd, is_rx)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200189
Guvenc Gulce194730a2021-06-16 16:52:58 +0200190#define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx) \
191 SMC_STAT_RMB(smc, alloc, is_smcd, is_rx)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200192
Guvenc Gulce194730a2021-06-16 16:52:58 +0200193#define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rx) \
194 SMC_STAT_RMB(smc, dgrade, is_smcd, is_rx)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200195
Guvenc Gulce194730a2021-06-16 16:52:58 +0200196#define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd) \
197 SMC_STAT_RMB(smc, buf_full_peer, is_smcd, false)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200198
Guvenc Gulce194730a2021-06-16 16:52:58 +0200199#define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \
200 SMC_STAT_RMB(smc, buf_full, is_smcd, false)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200201
Guvenc Gulce194730a2021-06-16 16:52:58 +0200202#define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, is_smcd) \
203 SMC_STAT_RMB(smc, buf_size_small_peer, is_smcd, false)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200204
Guvenc Gulce194730a2021-06-16 16:52:58 +0200205#define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smcd) \
206 SMC_STAT_RMB(smc, buf_size_small, is_smcd, false)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200207
Guvenc Gulce194730a2021-06-16 16:52:58 +0200208#define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smcd) \
209 SMC_STAT_RMB(smc, buf_size_small, is_smcd, true)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200210
Guvenc Gulce194730a2021-06-16 16:52:58 +0200211#define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \
212 SMC_STAT_RMB(smc, buf_full, is_smcd, true)
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200213
Guvenc Gulce194730a2021-06-16 16:52:58 +0200214#define SMC_STAT_INC(_smc, type) \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200215do { \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200216 typeof(_smc) __smc = _smc; \
217 bool is_smcd = !(__smc)->conn.lnk; \
218 struct net *net = sock_net(&(__smc)->sk); \
219 struct smc_stats __percpu *smc_stats = net->smc.smc_stats; \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200220 if ((is_smcd)) \
221 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
222 else \
223 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \
224} \
225while (0)
226
Guvenc Gulce194730a2021-06-16 16:52:58 +0200227#define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200228do { \
229 typeof(_aclc) acl = (_aclc); \
230 bool is_v2 = (acl->hdr.version == SMC_V2); \
231 bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200232 struct smc_stats __percpu *smc_stats = (net)->smc.smc_stats; \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200233 if (is_v2 && is_smcd) \
234 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
235 else if (is_v2 && !is_smcd) \
236 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \
237 else if (!is_v2 && is_smcd) \
238 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \
239 else if (!is_v2 && !is_smcd) \
240 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \
241} \
242while (0)
243
Guvenc Gulce194730a2021-06-16 16:52:58 +0200244#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200245do { \
246 typeof(_ini) i = (_ini); \
247 bool is_v2 = (i->smcd_version & SMC_V2); \
248 bool is_smcd = (i->is_smcd); \
Guvenc Gulce194730a2021-06-16 16:52:58 +0200249 typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200250 if (is_v2 && is_smcd) \
251 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
252 else if (is_v2 && !is_smcd) \
253 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \
254 else if (!is_v2 && is_smcd) \
255 this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \
256 else if (!is_v2 && !is_smcd) \
257 this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \
258} \
259while (0)
260
Guvenc Gulce8c406022021-06-16 16:52:56 +0200261int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
Guvenc Gulcef0dd7bf2021-06-16 16:52:57 +0200262int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
Guvenc Gulce194730a2021-06-16 16:52:58 +0200263int smc_stats_init(struct net *net);
264void smc_stats_exit(struct net *net);
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200265
266#endif /* NET_SMC_SMC_STATS_H_ */