blob: 1a40277b512c959813f87e000810d40d719b0071 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002#ifndef _LINUX_RING_BUFFER_H
3#define _LINUX_RING_BUFFER_H
4
5#include <linux/mm.h>
6#include <linux/seq_file.h>
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05007#include <linux/poll.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04008
9struct ring_buffer;
10struct ring_buffer_iter;
11
12/*
Wenji Huangc3706f02009-02-10 01:03:18 -050013 * Don't refer to this struct directly, use functions below.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040014 */
15struct ring_buffer_event {
Lai Jiangshan334d4162009-04-24 11:27:05 +080016 u32 type_len:5, time_delta:27;
Vegard Nossum1744a212009-02-28 08:29:44 +010017
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040018 u32 array[];
19};
20
21/**
22 * enum ring_buffer_type - internal ring buffer types
23 *
Tom Zanussi2d622712009-03-22 03:30:49 -050024 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
25 * If time_delta is 0:
26 * array is ignored
27 * size is variable depending on how much
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040028 * padding is needed
Tom Zanussi2d622712009-03-22 03:30:49 -050029 * If time_delta is non zero:
Lai Jiangshan334d4162009-04-24 11:27:05 +080030 * array[0] holds the actual length
31 * size = 4 + length (bytes)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040032 *
33 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
34 * array[0] = time delta (28 .. 59)
35 * size = 8 bytes
36 *
Tom Zanussidc4e2802018-01-15 20:51:40 -060037 * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp
38 * Same format as TIME_EXTEND except that the
39 * value is an absolute timestamp, not a delta
40 * event.time_delta contains bottom 27 bits
41 * array[0] = top (28 .. 59) bits
42 * size = 8 bytes
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040043 *
Lai Jiangshan334d4162009-04-24 11:27:05 +080044 * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
45 * Data record
46 * If type_len is zero:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040047 * array[0] holds the actual length
Lai Jiangshan361b73d2008-12-08 10:58:08 +080048 * array[1..(length+3)/4] holds data
Lai Jiangshan334d4162009-04-24 11:27:05 +080049 * size = 4 + length (bytes)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040050 * else
Lai Jiangshan334d4162009-04-24 11:27:05 +080051 * length = type_len << 2
Lai Jiangshan361b73d2008-12-08 10:58:08 +080052 * array[0..(length+3)/4-1] holds data
53 * size = 4 + length (bytes)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040054 */
55enum ring_buffer_type {
Lai Jiangshan334d4162009-04-24 11:27:05 +080056 RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040057 RINGBUF_TYPE_PADDING,
58 RINGBUF_TYPE_TIME_EXTEND,
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040059 RINGBUF_TYPE_TIME_STAMP,
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040060};
61
62unsigned ring_buffer_event_length(struct ring_buffer_event *event);
63void *ring_buffer_event_data(struct ring_buffer_event *event);
Tom Zanussidc4e2802018-01-15 20:51:40 -060064u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040065
Steven Rostedtfa1b47d2009-04-02 00:09:41 -040066/*
Steven Rostedtfa1b47d2009-04-02 00:09:41 -040067 * ring_buffer_discard_commit will remove an event that has not
Vasyl Gomonovycha9235b52018-05-18 22:31:28 +020068 * been committed yet. If this is used, then ring_buffer_unlock_commit
Steven Rostedtfa1b47d2009-04-02 00:09:41 -040069 * must not be called on the discarded event. This function
70 * will try to remove the event from the ring buffer completely
71 * if another event has not been written after it.
72 *
73 * Example use:
74 *
75 * if (some_condition)
76 * ring_buffer_discard_commit(buffer, event);
77 * else
78 * ring_buffer_unlock_commit(buffer, event);
79 */
80void ring_buffer_discard_commit(struct ring_buffer *buffer,
81 struct ring_buffer_event *event);
82
83/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040084 * size is in bytes for each per CPU buffer.
85 */
86struct ring_buffer *
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +020087__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
88
89/*
90 * Because the ring buffer is generic, if other users of the ring buffer get
91 * traced by ftrace, it can produce lockdep warnings. We need to keep each
92 * ring buffer's lock class separate.
93 */
94#define ring_buffer_alloc(size, flags) \
95({ \
96 static struct lock_class_key __key; \
97 __ring_buffer_alloc((size), (flags), &__key); \
98})
99
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -0500100int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
Al Viroecf92702017-07-16 22:11:54 -0400101__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -0500102 struct file *filp, poll_table *poll_table);
103
104
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -0800105#define RING_BUFFER_ALL_CPUS -1
106
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400107void ring_buffer_free(struct ring_buffer *buffer);
108
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -0800109int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400110
David Sharp750912f2010-12-08 13:46:47 -0800111void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
112
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -0200113struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
114 unsigned long length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400115int ring_buffer_unlock_commit(struct ring_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -0200116 struct ring_buffer_event *event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400117int ring_buffer_write(struct ring_buffer *buffer,
118 unsigned long length, void *data);
119
Steven Rostedt (VMware)8e012062018-02-07 17:26:32 -0500120void ring_buffer_nest_start(struct ring_buffer *buffer);
121void ring_buffer_nest_end(struct ring_buffer *buffer);
122
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400123struct ring_buffer_event *
Steven Rostedt66a8cb92010-03-31 13:21:56 -0400124ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
125 unsigned long *lost_events);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400126struct ring_buffer_event *
Steven Rostedt66a8cb92010-03-31 13:21:56 -0400127ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
128 unsigned long *lost_events);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400129
130struct ring_buffer_iter *
Douglas Anderson31b265b2019-03-08 11:32:04 -0800131ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
David Miller72c9ddf2010-04-20 15:47:11 -0700132void ring_buffer_read_prepare_sync(void);
133void ring_buffer_read_start(struct ring_buffer_iter *iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400134void ring_buffer_read_finish(struct ring_buffer_iter *iter);
135
136struct ring_buffer_event *
137ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
138struct ring_buffer_event *
139ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
140void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
141int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
142
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -0800143unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400144
145void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
146void ring_buffer_reset(struct ring_buffer *buffer);
147
Steven Rostedt85bac322009-09-04 14:24:40 -0400148#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400149int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
150 struct ring_buffer *buffer_b, int cpu);
Steven Rostedt85bac322009-09-04 14:24:40 -0400151#else
152static inline int
153ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
154 struct ring_buffer *buffer_b, int cpu)
155{
156 return -ENODEV;
157}
158#endif
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400159
Yaowei Bai3d4e2042015-09-29 22:43:32 +0800160bool ring_buffer_empty(struct ring_buffer *buffer);
161bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400162
163void ring_buffer_record_disable(struct ring_buffer *buffer);
164void ring_buffer_record_enable(struct ring_buffer *buffer);
Steven Rostedt499e5472012-02-22 15:50:28 -0500165void ring_buffer_record_off(struct ring_buffer *buffer);
166void ring_buffer_record_on(struct ring_buffer *buffer);
Steven Rostedt (VMware)3ebea2802018-08-01 21:08:30 -0400167bool ring_buffer_record_is_on(struct ring_buffer *buffer);
Steven Rostedt (VMware)d7224c02018-08-01 21:09:50 -0400168bool ring_buffer_record_is_set_on(struct ring_buffer *buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400169void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
170void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
171
Yoshihiro YUNOMAE50ecf2c2012-10-11 16:27:54 -0700172u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -0700173unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400174unsigned long ring_buffer_entries(struct ring_buffer *buffer);
175unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
Robert Richtere09373f2008-11-26 14:04:19 +0100176unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
177unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
Steven Rostedtf0d2c682009-04-29 13:43:37 -0400178unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -0700179unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -0500180unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400181
Steven Rostedt37886f62009-03-17 17:22:06 -0400182u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
183void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
184 int cpu, u64 *ts);
185void ring_buffer_set_clock(struct ring_buffer *buffer,
186 u64 (*clock)(void));
Tom Zanussi00b41452018-01-15 20:51:39 -0600187void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs);
188bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400189
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -0500190size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
191size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500192
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -0700193void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -0400194void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500195int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
196 size_t len, int cpu, int full);
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500197
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400198struct trace_seq;
199
200int ring_buffer_print_entry_header(struct trace_seq *s);
201int ring_buffer_print_page_header(struct trace_seq *s);
202
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400203enum ring_buffer_flags {
204 RB_FL_OVERWRITE = 1 << 0,
205};
206
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +0100207#ifdef CONFIG_RING_BUFFER
208int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
209#else
210#define trace_rb_cpu_prepare NULL
211#endif
212
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400213#endif /* _LINUX_RING_BUFFER_H */