blob: 136ea0997e6df6c6d7e7910d02d2c54799ca85d8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002#ifndef _LINUX_RING_BUFFER_H
3#define _LINUX_RING_BUFFER_H
4
5#include <linux/mm.h>
6#include <linux/seq_file.h>
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05007#include <linux/poll.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04008
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05009struct trace_buffer;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040010struct ring_buffer_iter;
11
12/*
Wenji Huangc3706f02009-02-10 01:03:18 -050013 * Don't refer to this struct directly, use functions below.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040014 */
15struct ring_buffer_event {
Lai Jiangshan334d4162009-04-24 11:27:05 +080016 u32 type_len:5, time_delta:27;
Vegard Nossum1744a212009-02-28 08:29:44 +010017
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040018 u32 array[];
19};
20
21/**
22 * enum ring_buffer_type - internal ring buffer types
23 *
Tom Zanussi2d622712009-03-22 03:30:49 -050024 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
25 * If time_delta is 0:
26 * array is ignored
27 * size is variable depending on how much
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040028 * padding is needed
Tom Zanussi2d622712009-03-22 03:30:49 -050029 * If time_delta is non zero:
Lai Jiangshan334d4162009-04-24 11:27:05 +080030 * array[0] holds the actual length
31 * size = 4 + length (bytes)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040032 *
33 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
34 * array[0] = time delta (28 .. 59)
35 * size = 8 bytes
36 *
Tom Zanussidc4e2802018-01-15 20:51:40 -060037 * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp
38 * Same format as TIME_EXTEND except that the
39 * value is an absolute timestamp, not a delta
40 * event.time_delta contains bottom 27 bits
41 * array[0] = top (28 .. 59) bits
42 * size = 8 bytes
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040043 *
Lai Jiangshan334d4162009-04-24 11:27:05 +080044 * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
45 * Data record
46 * If type_len is zero:
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040047 * array[0] holds the actual length
Lai Jiangshan361b73d2008-12-08 10:58:08 +080048 * array[1..(length+3)/4] holds data
Lai Jiangshan334d4162009-04-24 11:27:05 +080049 * size = 4 + length (bytes)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040050 * else
Lai Jiangshan334d4162009-04-24 11:27:05 +080051 * length = type_len << 2
Lai Jiangshan361b73d2008-12-08 10:58:08 +080052 * array[0..(length+3)/4-1] holds data
53 * size = 4 + length (bytes)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040054 */
55enum ring_buffer_type {
Lai Jiangshan334d4162009-04-24 11:27:05 +080056 RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040057 RINGBUF_TYPE_PADDING,
58 RINGBUF_TYPE_TIME_EXTEND,
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040059 RINGBUF_TYPE_TIME_STAMP,
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040060};
61
62unsigned ring_buffer_event_length(struct ring_buffer_event *event);
63void *ring_buffer_event_data(struct ring_buffer_event *event);
Tom Zanussidc4e2802018-01-15 20:51:40 -060064u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040065
Steven Rostedtfa1b47d2009-04-02 00:09:41 -040066/*
Steven Rostedtfa1b47d2009-04-02 00:09:41 -040067 * ring_buffer_discard_commit will remove an event that has not
Vasyl Gomonovycha9235b52018-05-18 22:31:28 +020068 * been committed yet. If this is used, then ring_buffer_unlock_commit
Steven Rostedtfa1b47d2009-04-02 00:09:41 -040069 * must not be called on the discarded event. This function
70 * will try to remove the event from the ring buffer completely
71 * if another event has not been written after it.
72 *
73 * Example use:
74 *
75 * if (some_condition)
76 * ring_buffer_discard_commit(buffer, event);
77 * else
78 * ring_buffer_unlock_commit(buffer, event);
79 */
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -050080void ring_buffer_discard_commit(struct trace_buffer *buffer,
Steven Rostedtfa1b47d2009-04-02 00:09:41 -040081 struct ring_buffer_event *event);
82
83/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040084 * size is in bytes for each per CPU buffer.
85 */
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -050086struct trace_buffer *
Peter Zijlstra1f8a6a12009-06-08 18:18:39 +020087__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
88
89/*
90 * Because the ring buffer is generic, if other users of the ring buffer get
91 * traced by ftrace, it can produce lockdep warnings. We need to keep each
92 * ring buffer's lock class separate.
93 */
94#define ring_buffer_alloc(size, flags) \
95({ \
96 static struct lock_class_key __key; \
97 __ring_buffer_alloc((size), (flags), &__key); \
98})
99
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500100int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
101__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -0500102 struct file *filp, poll_table *poll_table);
103
104
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -0800105#define RING_BUFFER_ALL_CPUS -1
106
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500107void ring_buffer_free(struct trace_buffer *buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400108
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500109int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400110
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500111void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
David Sharp750912f2010-12-08 13:46:47 -0800112
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500113struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -0200114 unsigned long length);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500115int ring_buffer_unlock_commit(struct trace_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -0200116 struct ring_buffer_event *event);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500117int ring_buffer_write(struct trace_buffer *buffer,
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400118 unsigned long length, void *data);
119
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500120void ring_buffer_nest_start(struct trace_buffer *buffer);
121void ring_buffer_nest_end(struct trace_buffer *buffer);
Steven Rostedt (VMware)8e012062018-02-07 17:26:32 -0500122
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400123struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500124ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
Steven Rostedt66a8cb92010-03-31 13:21:56 -0400125 unsigned long *lost_events);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400126struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500127ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
Steven Rostedt66a8cb92010-03-31 13:21:56 -0400128 unsigned long *lost_events);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400129
130struct ring_buffer_iter *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500131ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
David Miller72c9ddf2010-04-20 15:47:11 -0700132void ring_buffer_read_prepare_sync(void);
133void ring_buffer_read_start(struct ring_buffer_iter *iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400134void ring_buffer_read_finish(struct ring_buffer_iter *iter);
135
136struct ring_buffer_event *
137ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -0400138void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400139void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
140int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -0400141bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400142
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500143unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400144
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500145void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +1000146void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500147void ring_buffer_reset(struct trace_buffer *buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400148
Steven Rostedt85bac322009-09-04 14:24:40 -0400149#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500150int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
151 struct trace_buffer *buffer_b, int cpu);
Steven Rostedt85bac322009-09-04 14:24:40 -0400152#else
153static inline int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500154ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
155 struct trace_buffer *buffer_b, int cpu)
Steven Rostedt85bac322009-09-04 14:24:40 -0400156{
157 return -ENODEV;
158}
159#endif
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400160
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500161bool ring_buffer_empty(struct trace_buffer *buffer);
162bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400163
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500164void ring_buffer_record_disable(struct trace_buffer *buffer);
165void ring_buffer_record_enable(struct trace_buffer *buffer);
166void ring_buffer_record_off(struct trace_buffer *buffer);
167void ring_buffer_record_on(struct trace_buffer *buffer);
168bool ring_buffer_record_is_on(struct trace_buffer *buffer);
169bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
170void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
171void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400172
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500173u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
174unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
175unsigned long ring_buffer_entries(struct trace_buffer *buffer);
176unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
177unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
178unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
179unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
180unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
181unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400182
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500183u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
184void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
Steven Rostedt37886f62009-03-17 17:22:06 -0400185 int cpu, u64 *ts);
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500186void ring_buffer_set_clock(struct trace_buffer *buffer,
Steven Rostedt37886f62009-03-17 17:22:06 -0400187 u64 (*clock)(void));
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500188void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
189bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400190
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500191size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
192size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500193
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500194void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
195void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
196int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500197 size_t len, int cpu, int full);
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500198
Steven Rostedtd1b182a2009-04-15 16:53:47 -0400199struct trace_seq;
200
201int ring_buffer_print_entry_header(struct trace_seq *s);
202int ring_buffer_print_page_header(struct trace_seq *s);
203
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400204enum ring_buffer_flags {
205 RB_FL_OVERWRITE = 1 << 0,
206};
207
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +0100208#ifdef CONFIG_RING_BUFFER
209int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
210#else
211#define trace_rb_cpu_prepare NULL
212#endif
213
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400214#endif /* _LINUX_RING_BUFFER_H */