Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __NET_GEN_STATS_H |
| 3 | #define __NET_GEN_STATS_H |
| 4 | |
| 5 | #include <linux/gen_stats.h> |
| 6 | #include <linux/socket.h> |
| 7 | #include <linux/rtnetlink.h> |
| 8 | #include <linux/pkt_sched.h> |
| 9 | |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 10 | /* Throughput stats. |
| 11 | * Must be initialized beforehand with gnet_stats_basic_sync_init(). |
| 12 | * |
| 13 | * If no reads can ever occur parallel to writes (e.g. stack-allocated |
| 14 | * bstats), then the internal stat values can be written to and read |
| 15 | * from directly. Otherwise, use _bstats_set/update() for writes and |
| 16 | * gnet_stats_add_basic() for reads. |
| 17 | */ |
| 18 | struct gnet_stats_basic_sync { |
| 19 | u64_stats_t bytes; |
| 20 | u64_stats_t packets; |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 21 | struct u64_stats_sync syncp; |
Eric Dumazet | 001c96d | 2018-11-16 07:43:29 -0800 | [diff] [blame] | 22 | } __aligned(2 * sizeof(u64)); |
John Fastabend | 22e0f8b | 2014-09-28 11:52:56 -0700 | [diff] [blame] | 23 | |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 24 | struct net_rate_estimator; |
| 25 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 26 | struct gnet_dump { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | spinlock_t * lock; |
| 28 | struct sk_buff * skb; |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 29 | struct nlattr * tail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 31 | /* Backward compatibility */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | int compat_tc_stats; |
| 33 | int compat_xstats; |
Nicolas Dichtel | 9854518e | 2016-04-26 10:06:18 +0200 | [diff] [blame] | 34 | int padattr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | void * xstats; |
| 36 | int xstats_len; |
| 37 | struct tc_stats tc_stats; |
| 38 | }; |
| 39 | |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 40 | void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b); |
Joe Perches | 8aae218 | 2013-09-20 11:23:26 -0700 | [diff] [blame] | 41 | int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, |
Nicolas Dichtel | 9854518e | 2016-04-26 10:06:18 +0200 | [diff] [blame] | 42 | struct gnet_dump *d, int padattr); |
Joe Perches | 8aae218 | 2013-09-20 11:23:26 -0700 | [diff] [blame] | 43 | |
| 44 | int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, |
| 45 | int tc_stats_type, int xstats_type, |
Nicolas Dichtel | 9854518e | 2016-04-26 10:06:18 +0200 | [diff] [blame] | 46 | spinlock_t *lock, struct gnet_dump *d, |
| 47 | int padattr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Ahmed S. Darwish | 29cbcd8 | 2021-10-16 10:49:10 +0200 | [diff] [blame] | 49 | int gnet_stats_copy_basic(struct gnet_dump *d, |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 50 | struct gnet_stats_basic_sync __percpu *cpu, |
Ahmed S. Darwish | 29cbcd8 | 2021-10-16 10:49:10 +0200 | [diff] [blame] | 51 | struct gnet_stats_basic_sync *b, bool running); |
| 52 | void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats, |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 53 | struct gnet_stats_basic_sync __percpu *cpu, |
Ahmed S. Darwish | 29cbcd8 | 2021-10-16 10:49:10 +0200 | [diff] [blame] | 54 | struct gnet_stats_basic_sync *b, bool running); |
| 55 | int gnet_stats_copy_basic_hw(struct gnet_dump *d, |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 56 | struct gnet_stats_basic_sync __percpu *cpu, |
Ahmed S. Darwish | 29cbcd8 | 2021-10-16 10:49:10 +0200 | [diff] [blame] | 57 | struct gnet_stats_basic_sync *b, bool running); |
Joe Perches | 8aae218 | 2013-09-20 11:23:26 -0700 | [diff] [blame] | 58 | int gnet_stats_copy_rate_est(struct gnet_dump *d, |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 59 | struct net_rate_estimator __rcu **ptr); |
John Fastabend | 6401585 | 2014-09-28 11:53:57 -0700 | [diff] [blame] | 60 | int gnet_stats_copy_queue(struct gnet_dump *d, |
John Fastabend | b0ab6f9 | 2014-09-28 11:54:24 -0700 | [diff] [blame] | 61 | struct gnet_stats_queue __percpu *cpu_q, |
| 62 | struct gnet_stats_queue *q, __u32 qlen); |
Sebastian Andrzej Siewior | 448e163 | 2021-10-16 10:49:03 +0200 | [diff] [blame] | 63 | void gnet_stats_add_queue(struct gnet_stats_queue *qstats, |
| 64 | const struct gnet_stats_queue __percpu *cpu_q, |
| 65 | const struct gnet_stats_queue *q); |
Joe Perches | 8aae218 | 2013-09-20 11:23:26 -0700 | [diff] [blame] | 66 | int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | |
Joe Perches | 8aae218 | 2013-09-20 11:23:26 -0700 | [diff] [blame] | 68 | int gnet_stats_finish_copy(struct gnet_dump *d); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 70 | int gen_new_estimator(struct gnet_stats_basic_sync *bstats, |
| 71 | struct gnet_stats_basic_sync __percpu *cpu_bstats, |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 72 | struct net_rate_estimator __rcu **rate_est, |
Vlad Buslov | 51a9f5a | 2018-08-10 20:51:54 +0300 | [diff] [blame] | 73 | spinlock_t *lock, |
Ahmed S. Darwish | 29cbcd8 | 2021-10-16 10:49:10 +0200 | [diff] [blame] | 74 | bool running, struct nlattr *opt); |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 75 | void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); |
Ahmed S. Darwish | 50dc9a8 | 2021-10-16 10:49:09 +0200 | [diff] [blame] | 76 | int gen_replace_estimator(struct gnet_stats_basic_sync *bstats, |
| 77 | struct gnet_stats_basic_sync __percpu *cpu_bstats, |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 78 | struct net_rate_estimator __rcu **ptr, |
Vlad Buslov | 51a9f5a | 2018-08-10 20:51:54 +0300 | [diff] [blame] | 79 | spinlock_t *lock, |
Ahmed S. Darwish | 29cbcd8 | 2021-10-16 10:49:10 +0200 | [diff] [blame] | 80 | bool running, struct nlattr *opt); |
Eric Dumazet | 1c0d32f | 2016-12-04 09:48:16 -0800 | [diff] [blame] | 81 | bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); |
| 82 | bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, |
| 83 | struct gnet_stats_rate_est64 *sample); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | #endif |