blob: 9de5c10293f593fac6f64458d78f413c3c9fe26c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Eric Dumazet16b8a472010-06-22 10:22:17 -07002#ifndef _LINUX_U64_STATS_SYNC_H
3#define _LINUX_U64_STATS_SYNC_H
4
5/*
6 * To properly implement 64bits network statistics on 32bit and 64bit hosts,
7 * we provide a synchronization point, that is a noop on 64bit or UP kernels.
8 *
9 * Key points :
10 * 1) Use a seqcount on SMP 32bits, with low overhead.
11 * 2) Whole thing is a noop on 64bit arches or UP kernels.
12 * 3) Write side must ensure mutual exclusion or one seqcount update could
13 * be lost, thus blocking readers forever.
14 * If this synchronization point is not a mutex, but a spinlock or
15 * spinlock_bh() or disable_bh() :
16 * 3.1) Write side should not sleep.
17 * 3.2) Write side should not allow preemption.
18 * 3.3) If applicable, interrupts should be disabled.
19 *
20 * 4) If reader fetches several counters, there is no guarantee the whole values
21 * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
22 *
23 * 5) readers are allowed to sleep or be preempted/interrupted : They perform
24 * pure reads. But if they have to fetch many values, it's better to not allow
25 * preemptions/interruptions to avoid many retries.
26 *
Eric Dumazetb6b3ecc2010-06-24 00:04:38 +000027 * 6) If counter might be written by an interrupt, readers should block interrupts.
28 * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
29 * read partial values)
30 *
Eric W. Biederman57a77442014-03-13 21:26:42 -070031 * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
32 * u64_stats_fetch_retry_irq() helpers
Eric Dumazet33d91f02010-06-24 00:54:06 +000033 *
Eric Dumazet16b8a472010-06-22 10:22:17 -070034 * Usage :
35 *
36 * Stats producer (writer) should use following template granted it already got
37 * an exclusive access to counters (a lock is already taken, or per cpu
38 * data is used [in a non preemptable context])
39 *
40 * spin_lock_bh(...) or other synchronization to get exclusive access
41 * ...
42 * u64_stats_update_begin(&stats->syncp);
Eric Dumazet316580b2019-11-07 16:27:20 -080043 * u64_stats_add(&stats->bytes64, len); // non atomic operation
44 * u64_stats_inc(&stats->packets64); // non atomic operation
Eric Dumazet16b8a472010-06-22 10:22:17 -070045 * u64_stats_update_end(&stats->syncp);
46 *
47 * While a consumer (reader) should use following template to get consistent
48 * snapshot for each variable (but no guarantee on several ones)
49 *
50 * u64 tbytes, tpackets;
51 * unsigned int start;
52 *
53 * do {
54 * start = u64_stats_fetch_begin(&stats->syncp);
Eric Dumazet316580b2019-11-07 16:27:20 -080055 * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
56 * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
Eric Dumazetb6b3ecc2010-06-24 00:04:38 +000057 * } while (u64_stats_fetch_retry(&stats->syncp, start));
Eric Dumazet16b8a472010-06-22 10:22:17 -070058 *
59 *
60 * Example of use in drivers/net/loopback.c, using per_cpu containers,
61 * in BH disabled context.
62 */
63#include <linux/seqlock.h>
64
Eric Dumazet33d91f02010-06-24 00:54:06 +000065struct u64_stats_sync {
Eric Dumazet16b8a472010-06-22 10:22:17 -070066#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
Eric Dumazet16b8a472010-06-22 10:22:17 -070067 seqcount_t seq;
Eric Dumazet16b8a472010-06-22 10:22:17 -070068#endif
Eric Dumazet33d91f02010-06-24 00:54:06 +000069};
70
Eric Dumazet316580b2019-11-07 16:27:20 -080071#if BITS_PER_LONG == 64
72#include <asm/local64.h>
73
74typedef struct {
75 local64_t v;
76} u64_stats_t ;
77
78static inline u64 u64_stats_read(const u64_stats_t *p)
79{
80 return local64_read(&p->v);
81}
82
83static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
84{
85 local64_add(val, &p->v);
86}
87
88static inline void u64_stats_inc(u64_stats_t *p)
89{
90 local64_inc(&p->v);
91}
92
93#else
94
95typedef struct {
96 u64 v;
97} u64_stats_t;
98
99static inline u64 u64_stats_read(const u64_stats_t *p)
100{
101 return p->v;
102}
103
104static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
105{
106 p->v += val;
107}
108
109static inline void u64_stats_inc(u64_stats_t *p)
110{
111 p->v++;
112}
113#endif
John Stultz827da442013-10-07 15:51:58 -0700114
Eric Dumazet9464ca62015-06-12 19:44:48 -0700115static inline void u64_stats_init(struct u64_stats_sync *syncp)
116{
John Stultz827da442013-10-07 15:51:58 -0700117#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
Eric Dumazet9464ca62015-06-12 19:44:48 -0700118 seqcount_init(&syncp->seq);
John Stultz827da442013-10-07 15:51:58 -0700119#endif
Eric Dumazet9464ca62015-06-12 19:44:48 -0700120}
John Stultz827da442013-10-07 15:51:58 -0700121
Jesper Juhlfa9f90b2010-11-28 21:39:34 +0100122static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
Eric Dumazet33d91f02010-06-24 00:54:06 +0000123{
124#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
125 write_seqcount_begin(&syncp->seq);
126#endif
127}
128
Jesper Juhlfa9f90b2010-11-28 21:39:34 +0100129static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
Eric Dumazet33d91f02010-06-24 00:54:06 +0000130{
131#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
132 write_seqcount_end(&syncp->seq);
133#endif
134}
135
Eric Dumazet26955782018-03-05 11:41:13 -0800136static inline unsigned long
137u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
138{
139 unsigned long flags = 0;
140
141#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
142 local_irq_save(flags);
143 write_seqcount_begin(&syncp->seq);
144#endif
145 return flags;
146}
147
148static inline void
149u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
150 unsigned long flags)
151{
152#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
153 write_seqcount_end(&syncp->seq);
154 local_irq_restore(flags);
155#endif
156}
157
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200158static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
Eric Dumazet33d91f02010-06-24 00:54:06 +0000159{
160#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
161 return read_seqcount_begin(&syncp->seq);
162#else
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200163 return 0;
164#endif
165}
166
167static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
168{
169#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
Eric Dumazet33d91f02010-06-24 00:54:06 +0000170 preempt_disable();
171#endif
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200172 return __u64_stats_fetch_begin(syncp);
173}
174
175static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
176 unsigned int start)
177{
178#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
179 return read_seqcount_retry(&syncp->seq, start);
180#else
181 return false;
Eric Dumazet33d91f02010-06-24 00:54:06 +0000182#endif
183}
184
Jesper Juhlfa9f90b2010-11-28 21:39:34 +0100185static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
Eric Dumazet33d91f02010-06-24 00:54:06 +0000186 unsigned int start)
187{
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200188#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
Eric Dumazet33d91f02010-06-24 00:54:06 +0000189 preempt_enable();
190#endif
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200191 return __u64_stats_fetch_retry(syncp, start);
Eric Dumazet33d91f02010-06-24 00:54:06 +0000192}
193
194/*
Eric W. Biederman57a77442014-03-13 21:26:42 -0700195 * In case irq handlers can update u64 counters, readers can use following helpers
Eric Dumazet33d91f02010-06-24 00:54:06 +0000196 * - SMP 32bit arches use seqcount protection, irq safe.
Eric W. Biederman57a77442014-03-13 21:26:42 -0700197 * - UP 32bit must disable irqs.
Eric Dumazet33d91f02010-06-24 00:54:06 +0000198 * - 64bit have no problem atomically reading u64 values, irq safe.
199 */
Eric W. Biederman57a77442014-03-13 21:26:42 -0700200static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
Eric Dumazet33d91f02010-06-24 00:54:06 +0000201{
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200202#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
Eric W. Biederman57a77442014-03-13 21:26:42 -0700203 local_irq_disable();
Eric Dumazet33d91f02010-06-24 00:54:06 +0000204#endif
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200205 return __u64_stats_fetch_begin(syncp);
Eric Dumazet33d91f02010-06-24 00:54:06 +0000206}
207
Eric W. Biederman57a77442014-03-13 21:26:42 -0700208static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200209 unsigned int start)
Eric Dumazet33d91f02010-06-24 00:54:06 +0000210{
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200211#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
Eric W. Biederman57a77442014-03-13 21:26:42 -0700212 local_irq_enable();
Eric Dumazet33d91f02010-06-24 00:54:06 +0000213#endif
Frederic Weisbecker68107df2016-09-26 02:29:19 +0200214 return __u64_stats_fetch_retry(syncp, start);
Eric Dumazet33d91f02010-06-24 00:54:06 +0000215}
Eric Dumazet16b8a472010-06-22 10:22:17 -0700216
217#endif /* _LINUX_U64_STATS_SYNC_H */