blob: 1424e02cef90c0139a175933577f1b8537bce51a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __NET_GEN_STATS_H
3#define __NET_GEN_STATS_H
4
5#include <linux/gen_stats.h>
6#include <linux/socket.h>
7#include <linux/rtnetlink.h>
8#include <linux/pkt_sched.h>
9
Eric Dumazet4d390c22019-11-04 19:13:13 -080010/* Note: this used to be in include/uapi/linux/gen_stats.h */
11struct gnet_stats_basic_packed {
12 __u64 bytes;
Eric Dumazetd0083d92019-11-04 19:13:14 -080013 __u64 packets;
14};
Eric Dumazet4d390c22019-11-04 19:13:13 -080015
John Fastabend22e0f8b2014-09-28 11:52:56 -070016struct gnet_stats_basic_cpu {
17 struct gnet_stats_basic_packed bstats;
18 struct u64_stats_sync syncp;
Eric Dumazet001c96d2018-11-16 07:43:29 -080019} __aligned(2 * sizeof(u64));
John Fastabend22e0f8b2014-09-28 11:52:56 -070020
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080021struct net_rate_estimator;
22
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000023struct gnet_dump {
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 spinlock_t * lock;
25 struct sk_buff * skb;
Patrick McHardy1e904742008-01-22 22:11:17 -080026 struct nlattr * tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Lucas De Marchi25985ed2011-03-30 22:57:33 -030028 /* Backward compatibility */
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 int compat_tc_stats;
30 int compat_xstats;
Nicolas Dichtel9854518e2016-04-26 10:06:18 +020031 int padattr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 void * xstats;
33 int xstats_len;
34 struct tc_stats tc_stats;
35};
36
Joe Perches8aae2182013-09-20 11:23:26 -070037int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
Nicolas Dichtel9854518e2016-04-26 10:06:18 +020038 struct gnet_dump *d, int padattr);
Joe Perches8aae2182013-09-20 11:23:26 -070039
40int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
41 int tc_stats_type, int xstats_type,
Nicolas Dichtel9854518e2016-04-26 10:06:18 +020042 spinlock_t *lock, struct gnet_dump *d,
43 int padattr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Eric Dumazetedb09eb2016-06-06 09:37:16 -070045int gnet_stats_copy_basic(const seqcount_t *running,
46 struct gnet_dump *d,
John Fastabend22e0f8b2014-09-28 11:52:56 -070047 struct gnet_stats_basic_cpu __percpu *cpu,
Joe Perches8aae2182013-09-20 11:23:26 -070048 struct gnet_stats_basic_packed *b);
Eric Dumazetedb09eb2016-06-06 09:37:16 -070049void __gnet_stats_copy_basic(const seqcount_t *running,
50 struct gnet_stats_basic_packed *bstats,
John Fastabend22e0f8b2014-09-28 11:52:56 -070051 struct gnet_stats_basic_cpu __percpu *cpu,
52 struct gnet_stats_basic_packed *b);
Eelco Chaudron5e111212018-09-21 07:13:54 -040053int gnet_stats_copy_basic_hw(const seqcount_t *running,
54 struct gnet_dump *d,
55 struct gnet_stats_basic_cpu __percpu *cpu,
56 struct gnet_stats_basic_packed *b);
Joe Perches8aae2182013-09-20 11:23:26 -070057int gnet_stats_copy_rate_est(struct gnet_dump *d,
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080058 struct net_rate_estimator __rcu **ptr);
John Fastabend64015852014-09-28 11:53:57 -070059int gnet_stats_copy_queue(struct gnet_dump *d,
John Fastabendb0ab6f92014-09-28 11:54:24 -070060 struct gnet_stats_queue __percpu *cpu_q,
61 struct gnet_stats_queue *q, __u32 qlen);
John Fastabendb01ac092017-12-07 09:57:20 -080062void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
63 const struct gnet_stats_queue __percpu *cpu_q,
64 const struct gnet_stats_queue *q, __u32 qlen);
Joe Perches8aae2182013-09-20 11:23:26 -070065int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Joe Perches8aae2182013-09-20 11:23:26 -070067int gnet_stats_finish_copy(struct gnet_dump *d);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Joe Perches8aae2182013-09-20 11:23:26 -070069int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
John Fastabend22e0f8b2014-09-28 11:52:56 -070070 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080071 struct net_rate_estimator __rcu **rate_est,
Vlad Buslov51a9f5a2018-08-10 20:51:54 +030072 spinlock_t *lock,
Eric Dumazetedb09eb2016-06-06 09:37:16 -070073 seqcount_t *running, struct nlattr *opt);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080074void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
Joe Perches8aae2182013-09-20 11:23:26 -070075int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
John Fastabend22e0f8b2014-09-28 11:52:56 -070076 struct gnet_stats_basic_cpu __percpu *cpu_bstats,
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080077 struct net_rate_estimator __rcu **ptr,
Vlad Buslov51a9f5a2018-08-10 20:51:54 +030078 spinlock_t *lock,
Eric Dumazetedb09eb2016-06-06 09:37:16 -070079 seqcount_t *running, struct nlattr *opt);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -080080bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
81bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
82 struct gnet_stats_rate_est64 *sample);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#endif