Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 2 | #ifndef _LINUX_RING_BUFFER_H |
| 3 | #define _LINUX_RING_BUFFER_H |
| 4 | |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/seq_file.h> |
Steven Rostedt (Red Hat) | 1569345 | 2013-02-28 19:59:17 -0500 | [diff] [blame] | 7 | #include <linux/poll.h> |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 8 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 9 | struct trace_buffer; |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 10 | struct ring_buffer_iter; |
| 11 | |
| 12 | /* |
Wenji Huang | c3706f0 | 2009-02-10 01:03:18 -0500 | [diff] [blame] | 13 | * Don't refer to this struct directly, use functions below. |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 14 | */ |
| 15 | struct ring_buffer_event { |
Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 16 | u32 type_len:5, time_delta:27; |
Vegard Nossum | 1744a21 | 2009-02-28 08:29:44 +0100 | [diff] [blame] | 17 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 18 | u32 array[]; |
| 19 | }; |
| 20 | |
| 21 | /** |
| 22 | * enum ring_buffer_type - internal ring buffer types |
| 23 | * |
Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 24 | * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event |
| 25 | * If time_delta is 0: |
| 26 | * array is ignored |
| 27 | * size is variable depending on how much |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 28 | * padding is needed |
Tom Zanussi | 2d62271 | 2009-03-22 03:30:49 -0500 | [diff] [blame] | 29 | * If time_delta is non zero: |
Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 30 | * array[0] holds the actual length |
| 31 | * size = 4 + length (bytes) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 32 | * |
| 33 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta |
| 34 | * array[0] = time delta (28 .. 59) |
| 35 | * size = 8 bytes |
| 36 | * |
Tom Zanussi | dc4e280 | 2018-01-15 20:51:40 -0600 | [diff] [blame] | 37 | * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp |
| 38 | * Same format as TIME_EXTEND except that the |
| 39 | * value is an absolute timestamp, not a delta |
| 40 | * event.time_delta contains bottom 27 bits |
| 41 | * array[0] = top (28 .. 59) bits |
| 42 | * size = 8 bytes |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 43 | * |
Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 44 | * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: |
| 45 | * Data record |
| 46 | * If type_len is zero: |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 47 | * array[0] holds the actual length |
Lai Jiangshan | 361b73d | 2008-12-08 10:58:08 +0800 | [diff] [blame] | 48 | * array[1..(length+3)/4] holds data |
Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 49 | * size = 4 + length (bytes) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 50 | * else |
Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 51 | * length = type_len << 2 |
Lai Jiangshan | 361b73d | 2008-12-08 10:58:08 +0800 | [diff] [blame] | 52 | * array[0..(length+3)/4-1] holds data |
| 53 | * size = 4 + length (bytes) |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 54 | */ |
| 55 | enum ring_buffer_type { |
Lai Jiangshan | 334d416 | 2009-04-24 11:27:05 +0800 | [diff] [blame] | 56 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 57 | RINGBUF_TYPE_PADDING, |
| 58 | RINGBUF_TYPE_TIME_EXTEND, |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 59 | RINGBUF_TYPE_TIME_STAMP, |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 60 | }; |
| 61 | |
| 62 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); |
| 63 | void *ring_buffer_event_data(struct ring_buffer_event *event); |
Tom Zanussi | dc4e280 | 2018-01-15 20:51:40 -0600 | [diff] [blame] | 64 | u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 65 | |
Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 66 | /* |
Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 67 | * ring_buffer_discard_commit will remove an event that has not |
Vasyl Gomonovych | a9235b5 | 2018-05-18 22:31:28 +0200 | [diff] [blame] | 68 | * been committed yet. If this is used, then ring_buffer_unlock_commit |
Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 69 | * must not be called on the discarded event. This function |
| 70 | * will try to remove the event from the ring buffer completely |
| 71 | * if another event has not been written after it. |
| 72 | * |
| 73 | * Example use: |
| 74 | * |
| 75 | * if (some_condition) |
| 76 | * ring_buffer_discard_commit(buffer, event); |
| 77 | * else |
| 78 | * ring_buffer_unlock_commit(buffer, event); |
| 79 | */ |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 80 | void ring_buffer_discard_commit(struct trace_buffer *buffer, |
Steven Rostedt | fa1b47d | 2009-04-02 00:09:41 -0400 | [diff] [blame] | 81 | struct ring_buffer_event *event); |
| 82 | |
| 83 | /* |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 84 | * size is in bytes for each per CPU buffer. |
| 85 | */ |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 86 | struct trace_buffer * |
Peter Zijlstra | 1f8a6a1 | 2009-06-08 18:18:39 +0200 | [diff] [blame] | 87 | __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); |
| 88 | |
| 89 | /* |
| 90 | * Because the ring buffer is generic, if other users of the ring buffer get |
| 91 | * traced by ftrace, it can produce lockdep warnings. We need to keep each |
| 92 | * ring buffer's lock class separate. |
| 93 | */ |
| 94 | #define ring_buffer_alloc(size, flags) \ |
| 95 | ({ \ |
| 96 | static struct lock_class_key __key; \ |
| 97 | __ring_buffer_alloc((size), (flags), &__key); \ |
| 98 | }) |
| 99 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 100 | int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full); |
| 101 | __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, |
Steven Rostedt (Red Hat) | 1569345 | 2013-02-28 19:59:17 -0500 | [diff] [blame] | 102 | struct file *filp, poll_table *poll_table); |
| 103 | |
| 104 | |
Vaibhav Nagarnaik | 438ced1 | 2012-02-02 12:00:41 -0800 | [diff] [blame] | 105 | #define RING_BUFFER_ALL_CPUS -1 |
| 106 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 107 | void ring_buffer_free(struct trace_buffer *buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 108 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 109 | int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 110 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 111 | void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val); |
David Sharp | 750912f | 2010-12-08 13:46:47 -0800 | [diff] [blame] | 112 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 113 | struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer, |
Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 114 | unsigned long length); |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 115 | int ring_buffer_unlock_commit(struct trace_buffer *buffer, |
Arnaldo Carvalho de Melo | 0a98775 | 2009-02-05 16:12:56 -0200 | [diff] [blame] | 116 | struct ring_buffer_event *event); |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 117 | int ring_buffer_write(struct trace_buffer *buffer, |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 118 | unsigned long length, void *data); |
| 119 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 120 | void ring_buffer_nest_start(struct trace_buffer *buffer); |
| 121 | void ring_buffer_nest_end(struct trace_buffer *buffer); |
Steven Rostedt (VMware) | 8e01206 | 2018-02-07 17:26:32 -0500 | [diff] [blame] | 122 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 123 | struct ring_buffer_event * |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 124 | ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, |
Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 125 | unsigned long *lost_events); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 126 | struct ring_buffer_event * |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 127 | ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, |
Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 128 | unsigned long *lost_events); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 129 | |
| 130 | struct ring_buffer_iter * |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 131 | ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags); |
David Miller | 72c9ddf | 2010-04-20 15:47:11 -0700 | [diff] [blame] | 132 | void ring_buffer_read_prepare_sync(void); |
| 133 | void ring_buffer_read_start(struct ring_buffer_iter *iter); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 134 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); |
| 135 | |
| 136 | struct ring_buffer_event * |
| 137 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); |
Steven Rostedt (VMware) | bc1a72a | 2020-03-17 17:32:25 -0400 | [diff] [blame] | 138 | void ring_buffer_iter_advance(struct ring_buffer_iter *iter); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 139 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter); |
| 140 | int ring_buffer_iter_empty(struct ring_buffer_iter *iter); |
Steven Rostedt (VMware) | c9b7a4a | 2020-03-17 17:32:32 -0400 | [diff] [blame] | 141 | bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 142 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 143 | unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 144 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 145 | void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); |
Nicholas Piggin | b23d7a5f | 2020-06-25 15:34:03 +1000 | [diff] [blame] | 146 | void ring_buffer_reset_online_cpus(struct trace_buffer *buffer); |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 147 | void ring_buffer_reset(struct trace_buffer *buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 148 | |
Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 149 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 150 | int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, |
| 151 | struct trace_buffer *buffer_b, int cpu); |
Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 152 | #else |
| 153 | static inline int |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 154 | ring_buffer_swap_cpu(struct trace_buffer *buffer_a, |
| 155 | struct trace_buffer *buffer_b, int cpu) |
Steven Rostedt | 85bac32 | 2009-09-04 14:24:40 -0400 | [diff] [blame] | 156 | { |
| 157 | return -ENODEV; |
| 158 | } |
| 159 | #endif |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 160 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 161 | bool ring_buffer_empty(struct trace_buffer *buffer); |
| 162 | bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 163 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 164 | void ring_buffer_record_disable(struct trace_buffer *buffer); |
| 165 | void ring_buffer_record_enable(struct trace_buffer *buffer); |
| 166 | void ring_buffer_record_off(struct trace_buffer *buffer); |
| 167 | void ring_buffer_record_on(struct trace_buffer *buffer); |
| 168 | bool ring_buffer_record_is_on(struct trace_buffer *buffer); |
| 169 | bool ring_buffer_record_is_set_on(struct trace_buffer *buffer); |
| 170 | void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu); |
| 171 | void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 172 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 173 | u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu); |
| 174 | unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu); |
| 175 | unsigned long ring_buffer_entries(struct trace_buffer *buffer); |
| 176 | unsigned long ring_buffer_overruns(struct trace_buffer *buffer); |
| 177 | unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu); |
| 178 | unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu); |
| 179 | unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu); |
| 180 | unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); |
| 181 | unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 182 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 183 | u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu); |
| 184 | void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, |
Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 185 | int cpu, u64 *ts); |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 186 | void ring_buffer_set_clock(struct trace_buffer *buffer, |
Steven Rostedt | 37886f6 | 2009-03-17 17:22:06 -0400 | [diff] [blame] | 187 | u64 (*clock)(void)); |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 188 | void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs); |
| 189 | bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer); |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 190 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 191 | size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu); |
| 192 | size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu); |
Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 193 | |
Steven Rostedt (VMware) | 1329249 | 2019-12-13 13:58:57 -0500 | [diff] [blame] | 194 | void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); |
| 195 | void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data); |
| 196 | int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page, |
Steven Rostedt | ef7a4a1 | 2009-03-03 00:27:49 -0500 | [diff] [blame] | 197 | size_t len, int cpu, int full); |
Steven Rostedt | 8789a9e | 2008-12-02 15:34:07 -0500 | [diff] [blame] | 198 | |
Steven Rostedt | d1b182a | 2009-04-15 16:53:47 -0400 | [diff] [blame] | 199 | struct trace_seq; |
| 200 | |
| 201 | int ring_buffer_print_entry_header(struct trace_seq *s); |
| 202 | int ring_buffer_print_page_header(struct trace_seq *s); |
| 203 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 204 | enum ring_buffer_flags { |
| 205 | RB_FL_OVERWRITE = 1 << 0, |
| 206 | }; |
| 207 | |
Sebastian Andrzej Siewior | b32614c | 2016-11-27 00:13:34 +0100 | [diff] [blame] | 208 | #ifdef CONFIG_RING_BUFFER |
| 209 | int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); |
| 210 | #else |
| 211 | #define trace_rb_cpu_prepare NULL |
| 212 | #endif |
| 213 | |
Steven Rostedt | 7a8e76a | 2008-09-29 23:02:38 -0400 | [diff] [blame] | 214 | #endif /* _LINUX_RING_BUFFER_H */ |