blob: 7aa2b8e1fb298c4f994a745b114fc4da785ddf4b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __NET_GEN_STATS_H
3#define __NET_GEN_STATS_H
4
5#include <linux/gen_stats.h>
6#include <linux/socket.h>
7#include <linux/rtnetlink.h>
8#include <linux/pkt_sched.h>
9
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +020010/* Throughput stats.
11 * Must be initialized beforehand with gnet_stats_basic_sync_init().
12 *
13 * If no reads can ever occur parallel to writes (e.g. stack-allocated
14 * bstats), then the internal stat values can be written to and read
15 * from directly. Otherwise, use _bstats_set/update() for writes and
16 * gnet_stats_add_basic() for reads.
17 */
18struct gnet_stats_basic_sync {
19 u64_stats_t bytes;
20 u64_stats_t packets;
John Fastabend22e0f8b2014-09-28 11:52:56 -070021 struct u64_stats_sync syncp;
Eric Dumazet001c96d2018-11-16 07:43:29 -080022} __aligned(2 * sizeof(u64));
John Fastabend22e0f8b2014-09-28 11:52:56 -070023
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080024struct net_rate_estimator;
25
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000026struct gnet_dump {
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 spinlock_t * lock;
28 struct sk_buff * skb;
Patrick McHardy1e904742008-01-22 22:11:17 -080029 struct nlattr * tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Lucas De Marchi25985ed2011-03-30 22:57:33 -030031 /* Backward compatibility */
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 int compat_tc_stats;
33 int compat_xstats;
Nicolas Dichtel9854518e2016-04-26 10:06:18 +020034 int padattr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 void * xstats;
36 int xstats_len;
37 struct tc_stats tc_stats;
38};
39
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +020040void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
Joe Perches8aae2182013-09-20 11:23:26 -070041int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
Nicolas Dichtel9854518e2016-04-26 10:06:18 +020042 struct gnet_dump *d, int padattr);
Joe Perches8aae2182013-09-20 11:23:26 -070043
44int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
45 int tc_stats_type, int xstats_type,
Nicolas Dichtel9854518e2016-04-26 10:06:18 +020046 spinlock_t *lock, struct gnet_dump *d,
47 int padattr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +020049int gnet_stats_copy_basic(struct gnet_dump *d,
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +020050 struct gnet_stats_basic_sync __percpu *cpu,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +020051 struct gnet_stats_basic_sync *b, bool running);
52void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +020053 struct gnet_stats_basic_sync __percpu *cpu,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +020054 struct gnet_stats_basic_sync *b, bool running);
55int gnet_stats_copy_basic_hw(struct gnet_dump *d,
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +020056 struct gnet_stats_basic_sync __percpu *cpu,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +020057 struct gnet_stats_basic_sync *b, bool running);
Joe Perches8aae2182013-09-20 11:23:26 -070058int gnet_stats_copy_rate_est(struct gnet_dump *d,
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080059 struct net_rate_estimator __rcu **ptr);
John Fastabend64015852014-09-28 11:53:57 -070060int gnet_stats_copy_queue(struct gnet_dump *d,
John Fastabendb0ab6f92014-09-28 11:54:24 -070061 struct gnet_stats_queue __percpu *cpu_q,
62 struct gnet_stats_queue *q, __u32 qlen);
Sebastian Andrzej Siewior448e1632021-10-16 10:49:03 +020063void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
64 const struct gnet_stats_queue __percpu *cpu_q,
65 const struct gnet_stats_queue *q);
Joe Perches8aae2182013-09-20 11:23:26 -070066int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Joe Perches8aae2182013-09-20 11:23:26 -070068int gnet_stats_finish_copy(struct gnet_dump *d);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +020070int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
71 struct gnet_stats_basic_sync __percpu *cpu_bstats,
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080072 struct net_rate_estimator __rcu **rate_est,
Vlad Buslov51a9f5a2018-08-10 20:51:54 +030073 spinlock_t *lock,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +020074 bool running, struct nlattr *opt);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080075void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +020076int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
77 struct gnet_stats_basic_sync __percpu *cpu_bstats,
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080078 struct net_rate_estimator __rcu **ptr,
Vlad Buslov51a9f5a2018-08-10 20:51:54 +030079 spinlock_t *lock,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +020080 bool running, struct nlattr *opt);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080081bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
82bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
83 struct gnet_stats_rate_est64 *sample);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#endif