blob: 960cbf44c844a17dd156b25927d6e98c89840600 [file] [log] [blame]
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
Ingo Molnar14131f22009-02-26 18:47:11 +01007#include <linux/trace_clock.h>
Steven Rostedt78d904b2009-02-05 18:43:07 -05008#include <linux/ftrace_irq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04009#include <linux/spinlock.h>
10#include <linux/debugfs.h>
11#include <linux/uaccess.h>
Steven Rostedta81bd802009-02-06 01:45:16 -050012#include <linux/hardirq.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040013#include <linux/module.h>
14#include <linux/percpu.h>
15#include <linux/mutex.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040016#include <linux/init.h>
17#include <linux/hash.h>
18#include <linux/list.h>
Steven Rostedt554f7862009-03-11 22:00:13 -040019#include <linux/cpu.h>
Steven Rostedt7a8e76a2008-09-29 23:02:38 -040020#include <linux/fs.h>
21
Steven Rostedt182e9f52008-11-03 23:15:56 -050022#include "trace.h"
23
Steven Rostedt033601a2008-11-21 12:41:55 -050024/*
Steven Rostedt5cc98542009-03-12 22:24:17 -040025 * The ring buffer is made up of a list of pages. A separate list of pages is
26 * allocated for each CPU. A writer may only write to a buffer that is
27 * associated with the CPU it is currently executing on. A reader may read
28 * from any per cpu buffer.
29 *
30 * The reader is special. For each per cpu buffer, the reader has its own
31 * reader page. When a reader has read the entire reader page, this reader
32 * page is swapped with another page in the ring buffer.
33 *
34 * Now, as long as the writer is off the reader page, the reader can do what
35 * ever it wants with that page. The writer will never write to that page
36 * again (as long as it is out of the ring buffer).
37 *
38 * Here's some silly ASCII art.
39 *
40 * +------+
41 * |reader| RING BUFFER
42 * |page |
43 * +------+ +---+ +---+ +---+
44 * | |-->| |-->| |
45 * +---+ +---+ +---+
46 * ^ |
47 * | |
48 * +---------------+
49 *
50 *
51 * +------+
52 * |reader| RING BUFFER
53 * |page |------------------v
54 * +------+ +---+ +---+ +---+
55 * | |-->| |-->| |
56 * +---+ +---+ +---+
57 * ^ |
58 * | |
59 * +---------------+
60 *
61 *
62 * +------+
63 * |reader| RING BUFFER
64 * |page |------------------v
65 * +------+ +---+ +---+ +---+
66 * ^ | |-->| |-->| |
67 * | +---+ +---+ +---+
68 * | |
69 * | |
70 * +------------------------------+
71 *
72 *
73 * +------+
74 * |buffer| RING BUFFER
75 * |page |------------------v
76 * +------+ +---+ +---+ +---+
77 * ^ | | | |-->| |
78 * | New +---+ +---+ +---+
79 * | Reader------^ |
80 * | page |
81 * +------------------------------+
82 *
83 *
84 * After we make this swap, the reader can hand this page off to the splice
85 * code and be done with it. It can even allocate a new page if it needs to
86 * and swap that into the ring buffer.
87 *
88 * We will be using cmpxchg soon to make all this lockless.
89 *
90 */
91
92/*
Steven Rostedt033601a2008-11-21 12:41:55 -050093 * A fast way to enable or disable all ring buffers is to
94 * call tracing_on or tracing_off. Turning off the ring buffers
95 * prevents all ring buffers from being recorded to.
96 * Turning this switch on, makes it OK to write to the
97 * ring buffer, if the ring buffer is enabled itself.
98 *
99 * There's three layers that must be on in order to write
100 * to the ring buffer.
101 *
102 * 1) This global flag must be set.
103 * 2) The ring buffer must be enabled for recording.
104 * 3) The per cpu buffer must be enabled for recording.
105 *
106 * In case of an anomaly, this global flag has a bit set that
107 * will permantly disable all ring buffers.
108 */
109
110/*
111 * Global flag to disable all recording to ring buffers
112 * This has two bits: ON, DISABLED
113 *
114 * ON DISABLED
115 * ---- ----------
116 * 0 0 : ring buffers are off
117 * 1 0 : ring buffers are on
118 * X 1 : ring buffers are permanently disabled
119 */
120
121enum {
122 RB_BUFFERS_ON_BIT = 0,
123 RB_BUFFERS_DISABLED_BIT = 1,
124};
125
126enum {
127 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
128 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
129};
130
Hannes Eder5e398412009-02-10 19:44:34 +0100131static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
Steven Rostedta3583242008-11-11 15:01:42 -0500132
Steven Rostedt474d32b2009-03-03 19:51:40 -0500133#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
134
Steven Rostedta3583242008-11-11 15:01:42 -0500135/**
136 * tracing_on - enable all tracing buffers
137 *
138 * This function enables all tracing buffers that may have been
139 * disabled with tracing_off.
140 */
141void tracing_on(void)
142{
Steven Rostedt033601a2008-11-21 12:41:55 -0500143 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500144}
Robert Richterc4f50182008-12-11 16:49:22 +0100145EXPORT_SYMBOL_GPL(tracing_on);
Steven Rostedta3583242008-11-11 15:01:42 -0500146
147/**
148 * tracing_off - turn off all tracing buffers
149 *
150 * This function stops all tracing buffers from recording data.
151 * It does not disable any overhead the tracers themselves may
152 * be causing. This function simply causes all recording to
153 * the ring buffers to fail.
154 */
155void tracing_off(void)
156{
Steven Rostedt033601a2008-11-21 12:41:55 -0500157 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
158}
Robert Richterc4f50182008-12-11 16:49:22 +0100159EXPORT_SYMBOL_GPL(tracing_off);
Steven Rostedt033601a2008-11-21 12:41:55 -0500160
161/**
162 * tracing_off_permanent - permanently disable ring buffers
163 *
164 * This function, once called, will disable all ring buffers
Wenji Huangc3706f02009-02-10 01:03:18 -0500165 * permanently.
Steven Rostedt033601a2008-11-21 12:41:55 -0500166 */
167void tracing_off_permanent(void)
168{
169 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
Steven Rostedta3583242008-11-11 15:01:42 -0500170}
171
Steven Rostedt988ae9d2009-02-14 19:17:02 -0500172/**
173 * tracing_is_on - show state of ring buffers enabled
174 */
175int tracing_is_on(void)
176{
177 return ring_buffer_flags == RB_BUFFERS_ON;
178}
179EXPORT_SYMBOL_GPL(tracing_is_on);
180
Ingo Molnard06bbd62008-11-12 10:11:37 +0100181#include "trace.h"
182
Steven Rostedte3d6bf02009-03-03 13:53:07 -0500183#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
Andrew Morton67d34722009-01-09 12:27:09 -0800184#define RB_ALIGNMENT 4U
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400185#define RB_MAX_SMALL_DATA 28
186
187enum {
188 RB_LEN_TIME_EXTEND = 8,
189 RB_LEN_TIME_STAMP = 16,
190};
191
Tom Zanussi2d622712009-03-22 03:30:49 -0500192static inline int rb_null_event(struct ring_buffer_event *event)
193{
194 return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
195}
196
197static inline int rb_discarded_event(struct ring_buffer_event *event)
198{
199 return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
200}
201
202static void rb_event_set_padding(struct ring_buffer_event *event)
203{
204 event->type = RINGBUF_TYPE_PADDING;
205 event->time_delta = 0;
206}
207
208/**
209 * ring_buffer_event_discard - discard an event in the ring buffer
210 * @buffer: the ring buffer
211 * @event: the event to discard
212 *
213 * Sometimes a event that is in the ring buffer needs to be ignored.
214 * This function lets the user discard an event in the ring buffer
215 * and then that event will not be read later.
216 *
217 * Note, it is up to the user to be careful with this, and protect
218 * against races. If the user discards an event that has been consumed
219 * it is possible that it could corrupt the ring buffer.
220 */
221void ring_buffer_event_discard(struct ring_buffer_event *event)
222{
223 event->type = RINGBUF_TYPE_PADDING;
224 /* time delta must be non zero */
225 if (!event->time_delta)
226 event->time_delta = 1;
227}
228
229static unsigned
230rb_event_data_length(struct ring_buffer_event *event)
231{
232 unsigned length;
233
234 if (event->len)
235 length = event->len * RB_ALIGNMENT;
236 else
237 length = event->array[0];
238 return length + RB_EVNT_HDR_SIZE;
239}
240
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400241/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800242static unsigned
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400243rb_event_length(struct ring_buffer_event *event)
244{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400245 switch (event->type) {
246 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -0500247 if (rb_null_event(event))
248 /* undefined */
249 return -1;
250 return rb_event_data_length(event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400251
252 case RINGBUF_TYPE_TIME_EXTEND:
253 return RB_LEN_TIME_EXTEND;
254
255 case RINGBUF_TYPE_TIME_STAMP:
256 return RB_LEN_TIME_STAMP;
257
258 case RINGBUF_TYPE_DATA:
Tom Zanussi2d622712009-03-22 03:30:49 -0500259 return rb_event_data_length(event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400260 default:
261 BUG();
262 }
263 /* not hit */
264 return 0;
265}
266
267/**
268 * ring_buffer_event_length - return the length of the event
269 * @event: the event to get the length of
270 */
271unsigned ring_buffer_event_length(struct ring_buffer_event *event)
272{
Robert Richter465634a2009-01-07 15:32:11 +0100273 unsigned length = rb_event_length(event);
274 if (event->type != RINGBUF_TYPE_DATA)
275 return length;
276 length -= RB_EVNT_HDR_SIZE;
277 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
278 length -= sizeof(event->array[0]);
279 return length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400280}
Robert Richterc4f50182008-12-11 16:49:22 +0100281EXPORT_SYMBOL_GPL(ring_buffer_event_length);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400282
283/* inline for ring buffer fast paths */
Andrew Morton34a148b2009-01-09 12:27:09 -0800284static void *
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400285rb_event_data(struct ring_buffer_event *event)
286{
287 BUG_ON(event->type != RINGBUF_TYPE_DATA);
288 /* If length is in len field, then array[0] has the data */
289 if (event->len)
290 return (void *)&event->array[0];
291 /* Otherwise length is in array[0] and array[1] has the data */
292 return (void *)&event->array[1];
293}
294
295/**
296 * ring_buffer_event_data - return the data of the event
297 * @event: the event to get the data from
298 */
299void *ring_buffer_event_data(struct ring_buffer_event *event)
300{
301 return rb_event_data(event);
302}
Robert Richterc4f50182008-12-11 16:49:22 +0100303EXPORT_SYMBOL_GPL(ring_buffer_event_data);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400304
305#define for_each_buffer_cpu(buffer, cpu) \
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030306 for_each_cpu(cpu, buffer->cpumask)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400307
308#define TS_SHIFT 27
309#define TS_MASK ((1ULL << TS_SHIFT) - 1)
310#define TS_DELTA_TEST (~TS_MASK)
311
Steven Rostedtabc9b562008-12-02 15:34:06 -0500312struct buffer_data_page {
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400313 u64 time_stamp; /* page time stamp */
Wenji Huangc3706f02009-02-10 01:03:18 -0500314 local_t commit; /* write committed index */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500315 unsigned char data[]; /* data of buffer page */
316};
317
318struct buffer_page {
319 local_t write; /* index for next write */
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400320 unsigned read; /* index for next read */
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400321 struct list_head list; /* list of free pages */
Steven Rostedtabc9b562008-12-02 15:34:06 -0500322 struct buffer_data_page *page; /* Actual data page */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400323};
324
Steven Rostedt044fa782008-12-02 23:50:03 -0500325static void rb_init_page(struct buffer_data_page *bpage)
Steven Rostedtabc9b562008-12-02 15:34:06 -0500326{
Steven Rostedt044fa782008-12-02 23:50:03 -0500327 local_set(&bpage->commit, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -0500328}
329
Steven Rostedt474d32b2009-03-03 19:51:40 -0500330/**
331 * ring_buffer_page_len - the size of data on the page.
332 * @page: The page to read
333 *
334 * Returns the amount of data on the page, including buffer page header.
335 */
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500336size_t ring_buffer_page_len(void *page)
337{
Steven Rostedt474d32b2009-03-03 19:51:40 -0500338 return local_read(&((struct buffer_data_page *)page)->commit)
339 + BUF_PAGE_HDR_SIZE;
Steven Rostedtef7a4a12009-03-03 00:27:49 -0500340}
341
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400342/*
Steven Rostedted568292008-09-29 23:02:40 -0400343 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
344 * this issue out.
345 */
Andrew Morton34a148b2009-01-09 12:27:09 -0800346static void free_buffer_page(struct buffer_page *bpage)
Steven Rostedted568292008-09-29 23:02:40 -0400347{
Andrew Morton34a148b2009-01-09 12:27:09 -0800348 free_page((unsigned long)bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400349 kfree(bpage);
Steven Rostedted568292008-09-29 23:02:40 -0400350}
351
352/*
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400353 * We need to fit the time_stamp delta into 27 bits.
354 */
355static inline int test_time_stamp(u64 delta)
356{
357 if (delta & TS_DELTA_TEST)
358 return 1;
359 return 0;
360}
361
Steven Rostedt474d32b2009-03-03 19:51:40 -0500362#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400363
364/*
365 * head_page == tail_page && head == tail then buffer is empty.
366 */
367struct ring_buffer_per_cpu {
368 int cpu;
369 struct ring_buffer *buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100370 spinlock_t reader_lock; /* serialize readers */
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500371 raw_spinlock_t lock;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400372 struct lock_class_key lock_key;
373 struct list_head pages;
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400374 struct buffer_page *head_page; /* read from head */
375 struct buffer_page *tail_page; /* write to tail */
Wenji Huangc3706f02009-02-10 01:03:18 -0500376 struct buffer_page *commit_page; /* committed pages */
Steven Rostedtd7690412008-10-01 00:29:53 -0400377 struct buffer_page *reader_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400378 unsigned long overrun;
379 unsigned long entries;
380 u64 write_stamp;
381 u64 read_stamp;
382 atomic_t record_disabled;
383};
384
385struct ring_buffer {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400386 unsigned pages;
387 unsigned flags;
388 int cpus;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400389 atomic_t record_disabled;
Arnaldo Carvalho de Melo00f62f62009-02-09 17:04:06 -0200390 cpumask_var_t cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400391
392 struct mutex mutex;
393
394 struct ring_buffer_per_cpu **buffers;
Steven Rostedt554f7862009-03-11 22:00:13 -0400395
Steven Rostedt59222ef2009-03-12 11:46:03 -0400396#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400397 struct notifier_block cpu_notify;
398#endif
Steven Rostedt37886f62009-03-17 17:22:06 -0400399 u64 (*clock)(void);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400400};
401
402struct ring_buffer_iter {
403 struct ring_buffer_per_cpu *cpu_buffer;
404 unsigned long head;
405 struct buffer_page *head_page;
406 u64 read_stamp;
407};
408
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500409/* buffer may be either ring_buffer or ring_buffer_per_cpu */
Steven Rostedtbf41a152008-10-04 02:00:59 -0400410#define RB_WARN_ON(buffer, cond) \
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500411 ({ \
412 int _____ret = unlikely(cond); \
413 if (_____ret) { \
Steven Rostedtbf41a152008-10-04 02:00:59 -0400414 atomic_inc(&buffer->record_disabled); \
415 WARN_ON(1); \
416 } \
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500417 _____ret; \
418 })
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500419
Steven Rostedt37886f62009-03-17 17:22:06 -0400420/* Up this if you want to test the TIME_EXTENTS and normalization */
421#define DEBUG_SHIFT 0
422
423u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
424{
425 u64 time;
426
427 preempt_disable_notrace();
428 /* shift to debug/test normalization and TIME_EXTENTS */
429 time = buffer->clock() << DEBUG_SHIFT;
430 preempt_enable_no_resched_notrace();
431
432 return time;
433}
434EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
435
436void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
437 int cpu, u64 *ts)
438{
439 /* Just stupid testing the normalize function and deltas */
440 *ts >>= DEBUG_SHIFT;
441}
442EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
443
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400444/**
445 * check_pages - integrity check of buffer pages
446 * @cpu_buffer: CPU buffer with pages to test
447 *
Wenji Huangc3706f02009-02-10 01:03:18 -0500448 * As a safety measure we check to make sure the data pages have not
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400449 * been corrupted.
450 */
451static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
452{
453 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500454 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400455
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500456 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
457 return -1;
458 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
459 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400460
Steven Rostedt044fa782008-12-02 23:50:03 -0500461 list_for_each_entry_safe(bpage, tmp, head, list) {
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500462 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500463 bpage->list.next->prev != &bpage->list))
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500464 return -1;
465 if (RB_WARN_ON(cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500466 bpage->list.prev->next != &bpage->list))
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500467 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400468 }
469
470 return 0;
471}
472
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400473static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
474 unsigned nr_pages)
475{
476 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500477 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400478 unsigned long addr;
479 LIST_HEAD(pages);
480 unsigned i;
481
482 for (i = 0; i < nr_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500483 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedtaa1e0e3b2008-10-02 19:18:09 -0400484 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500485 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400486 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500487 list_add(&bpage->list, &pages);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400488
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400489 addr = __get_free_page(GFP_KERNEL);
490 if (!addr)
491 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500492 bpage->page = (void *)addr;
493 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400494 }
495
496 list_splice(&pages, head);
497
498 rb_check_pages(cpu_buffer);
499
500 return 0;
501
502 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500503 list_for_each_entry_safe(bpage, tmp, &pages, list) {
504 list_del_init(&bpage->list);
505 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400506 }
507 return -ENOMEM;
508}
509
510static struct ring_buffer_per_cpu *
511rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
512{
513 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt044fa782008-12-02 23:50:03 -0500514 struct buffer_page *bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400515 unsigned long addr;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400516 int ret;
517
518 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
519 GFP_KERNEL, cpu_to_node(cpu));
520 if (!cpu_buffer)
521 return NULL;
522
523 cpu_buffer->cpu = cpu;
524 cpu_buffer->buffer = buffer;
Steven Rostedtf83c9d02008-11-11 18:47:44 +0100525 spin_lock_init(&cpu_buffer->reader_lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -0500526 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400527 INIT_LIST_HEAD(&cpu_buffer->pages);
528
Steven Rostedt044fa782008-12-02 23:50:03 -0500529 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400530 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500531 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400532 goto fail_free_buffer;
533
Steven Rostedt044fa782008-12-02 23:50:03 -0500534 cpu_buffer->reader_page = bpage;
Steven Rostedtd7690412008-10-01 00:29:53 -0400535 addr = __get_free_page(GFP_KERNEL);
536 if (!addr)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400537 goto fail_free_reader;
Steven Rostedt044fa782008-12-02 23:50:03 -0500538 bpage->page = (void *)addr;
539 rb_init_page(bpage->page);
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400540
Steven Rostedtd7690412008-10-01 00:29:53 -0400541 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
Steven Rostedtd7690412008-10-01 00:29:53 -0400542
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400543 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
544 if (ret < 0)
Steven Rostedtd7690412008-10-01 00:29:53 -0400545 goto fail_free_reader;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400546
547 cpu_buffer->head_page
548 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400549 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400550
551 return cpu_buffer;
552
Steven Rostedtd7690412008-10-01 00:29:53 -0400553 fail_free_reader:
554 free_buffer_page(cpu_buffer->reader_page);
555
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400556 fail_free_buffer:
557 kfree(cpu_buffer);
558 return NULL;
559}
560
561static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
562{
563 struct list_head *head = &cpu_buffer->pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500564 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400565
Steven Rostedtd7690412008-10-01 00:29:53 -0400566 free_buffer_page(cpu_buffer->reader_page);
567
Steven Rostedt044fa782008-12-02 23:50:03 -0500568 list_for_each_entry_safe(bpage, tmp, head, list) {
569 list_del_init(&bpage->list);
570 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400571 }
572 kfree(cpu_buffer);
573}
574
Steven Rostedta7b13742008-09-29 23:02:39 -0400575/*
576 * Causes compile errors if the struct buffer_page gets bigger
577 * than the struct page.
578 */
579extern int ring_buffer_page_too_big(void);
580
Steven Rostedt59222ef2009-03-12 11:46:03 -0400581#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +0100582static int rb_cpu_notify(struct notifier_block *self,
583 unsigned long action, void *hcpu);
Steven Rostedt554f7862009-03-11 22:00:13 -0400584#endif
585
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400586/**
587 * ring_buffer_alloc - allocate a new ring_buffer
Robert Richter68814b52008-11-24 12:24:12 +0100588 * @size: the size in bytes per cpu that is needed.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400589 * @flags: attributes to set for the ring buffer.
590 *
591 * Currently the only flag that is available is the RB_FL_OVERWRITE
592 * flag. This flag means that the buffer will overwrite old data
593 * when the buffer wraps. If this flag is not set, the buffer will
594 * drop data when the tail hits the head.
595 */
596struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
597{
598 struct ring_buffer *buffer;
599 int bsize;
600 int cpu;
601
Steven Rostedta7b13742008-09-29 23:02:39 -0400602 /* Paranoid! Optimizes out when all is well */
603 if (sizeof(struct buffer_page) > sizeof(struct page))
604 ring_buffer_page_too_big();
605
606
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400607 /* keep it in its own cache line */
608 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
609 GFP_KERNEL);
610 if (!buffer)
611 return NULL;
612
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030613 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
614 goto fail_free_buffer;
615
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400616 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
617 buffer->flags = flags;
Steven Rostedt37886f62009-03-17 17:22:06 -0400618 buffer->clock = trace_clock_local;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400619
620 /* need at least two pages */
621 if (buffer->pages == 1)
622 buffer->pages++;
623
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100624 /*
625 * In case of non-hotplug cpu, if the ring-buffer is allocated
626 * in early initcall, it will not be notified of secondary cpus.
627 * In that off case, we need to allocate for all possible cpus.
628 */
629#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400630 get_online_cpus();
631 cpumask_copy(buffer->cpumask, cpu_online_mask);
Frederic Weisbecker3bf832c2009-03-19 14:47:33 +0100632#else
633 cpumask_copy(buffer->cpumask, cpu_possible_mask);
634#endif
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400635 buffer->cpus = nr_cpu_ids;
636
637 bsize = sizeof(void *) * nr_cpu_ids;
638 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
639 GFP_KERNEL);
640 if (!buffer->buffers)
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030641 goto fail_free_cpumask;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400642
643 for_each_buffer_cpu(buffer, cpu) {
644 buffer->buffers[cpu] =
645 rb_allocate_cpu_buffer(buffer, cpu);
646 if (!buffer->buffers[cpu])
647 goto fail_free_buffers;
648 }
649
Steven Rostedt59222ef2009-03-12 11:46:03 -0400650#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400651 buffer->cpu_notify.notifier_call = rb_cpu_notify;
652 buffer->cpu_notify.priority = 0;
653 register_cpu_notifier(&buffer->cpu_notify);
654#endif
655
656 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400657 mutex_init(&buffer->mutex);
658
659 return buffer;
660
661 fail_free_buffers:
662 for_each_buffer_cpu(buffer, cpu) {
663 if (buffer->buffers[cpu])
664 rb_free_cpu_buffer(buffer->buffers[cpu]);
665 }
666 kfree(buffer->buffers);
667
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030668 fail_free_cpumask:
669 free_cpumask_var(buffer->cpumask);
Steven Rostedt554f7862009-03-11 22:00:13 -0400670 put_online_cpus();
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030671
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400672 fail_free_buffer:
673 kfree(buffer);
674 return NULL;
675}
Robert Richterc4f50182008-12-11 16:49:22 +0100676EXPORT_SYMBOL_GPL(ring_buffer_alloc);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400677
678/**
679 * ring_buffer_free - free a ring buffer.
680 * @buffer: the buffer to free.
681 */
682void
683ring_buffer_free(struct ring_buffer *buffer)
684{
685 int cpu;
686
Steven Rostedt554f7862009-03-11 22:00:13 -0400687 get_online_cpus();
688
Steven Rostedt59222ef2009-03-12 11:46:03 -0400689#ifdef CONFIG_HOTPLUG_CPU
Steven Rostedt554f7862009-03-11 22:00:13 -0400690 unregister_cpu_notifier(&buffer->cpu_notify);
691#endif
692
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400693 for_each_buffer_cpu(buffer, cpu)
694 rb_free_cpu_buffer(buffer->buffers[cpu]);
695
Steven Rostedt554f7862009-03-11 22:00:13 -0400696 put_online_cpus();
697
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030698 free_cpumask_var(buffer->cpumask);
699
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400700 kfree(buffer);
701}
Robert Richterc4f50182008-12-11 16:49:22 +0100702EXPORT_SYMBOL_GPL(ring_buffer_free);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400703
Steven Rostedt37886f62009-03-17 17:22:06 -0400704void ring_buffer_set_clock(struct ring_buffer *buffer,
705 u64 (*clock)(void))
706{
707 buffer->clock = clock;
708}
709
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400710static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
711
712static void
713rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
714{
Steven Rostedt044fa782008-12-02 23:50:03 -0500715 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400716 struct list_head *p;
717 unsigned i;
718
719 atomic_inc(&cpu_buffer->record_disabled);
720 synchronize_sched();
721
722 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500723 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
724 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400725 p = cpu_buffer->pages.next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500726 bpage = list_entry(p, struct buffer_page, list);
727 list_del_init(&bpage->list);
728 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400729 }
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500730 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
731 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400732
733 rb_reset_cpu(cpu_buffer);
734
735 rb_check_pages(cpu_buffer);
736
737 atomic_dec(&cpu_buffer->record_disabled);
738
739}
740
741static void
742rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
743 struct list_head *pages, unsigned nr_pages)
744{
Steven Rostedt044fa782008-12-02 23:50:03 -0500745 struct buffer_page *bpage;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400746 struct list_head *p;
747 unsigned i;
748
749 atomic_inc(&cpu_buffer->record_disabled);
750 synchronize_sched();
751
752 for (i = 0; i < nr_pages; i++) {
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500753 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
754 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400755 p = pages->next;
Steven Rostedt044fa782008-12-02 23:50:03 -0500756 bpage = list_entry(p, struct buffer_page, list);
757 list_del_init(&bpage->list);
758 list_add_tail(&bpage->list, &cpu_buffer->pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400759 }
760 rb_reset_cpu(cpu_buffer);
761
762 rb_check_pages(cpu_buffer);
763
764 atomic_dec(&cpu_buffer->record_disabled);
765}
766
767/**
768 * ring_buffer_resize - resize the ring buffer
769 * @buffer: the buffer to resize.
770 * @size: the new size.
771 *
772 * The tracer is responsible for making sure that the buffer is
773 * not being used while changing the size.
774 * Note: We may be able to change the above requirement by using
775 * RCU synchronizations.
776 *
777 * Minimum size is 2 * BUF_PAGE_SIZE.
778 *
779 * Returns -1 on failure.
780 */
781int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
782{
783 struct ring_buffer_per_cpu *cpu_buffer;
784 unsigned nr_pages, rm_pages, new_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500785 struct buffer_page *bpage, *tmp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400786 unsigned long buffer_size;
787 unsigned long addr;
788 LIST_HEAD(pages);
789 int i, cpu;
790
Ingo Molnaree51a1d2008-11-13 14:58:31 +0100791 /*
792 * Always succeed at resizing a non-existent buffer:
793 */
794 if (!buffer)
795 return size;
796
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400797 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
798 size *= BUF_PAGE_SIZE;
799 buffer_size = buffer->pages * BUF_PAGE_SIZE;
800
801 /* we need a minimum of two pages */
802 if (size < BUF_PAGE_SIZE * 2)
803 size = BUF_PAGE_SIZE * 2;
804
805 if (size == buffer_size)
806 return size;
807
808 mutex_lock(&buffer->mutex);
Steven Rostedt554f7862009-03-11 22:00:13 -0400809 get_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400810
811 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
812
813 if (size < buffer_size) {
814
815 /* easy case, just free pages */
Steven Rostedt554f7862009-03-11 22:00:13 -0400816 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
817 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400818
819 rm_pages = buffer->pages - nr_pages;
820
821 for_each_buffer_cpu(buffer, cpu) {
822 cpu_buffer = buffer->buffers[cpu];
823 rb_remove_pages(cpu_buffer, rm_pages);
824 }
825 goto out;
826 }
827
828 /*
829 * This is a bit more difficult. We only want to add pages
830 * when we can allocate enough for all CPUs. We do this
831 * by allocating all the pages and storing them on a local
832 * link list. If we succeed in our allocation, then we
833 * add these pages to the cpu_buffers. Otherwise we just free
834 * them all and return -ENOMEM;
835 */
Steven Rostedt554f7862009-03-11 22:00:13 -0400836 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
837 goto out_fail;
Steven Rostedtf536aaf2008-11-10 23:07:30 -0500838
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400839 new_pages = nr_pages - buffer->pages;
840
841 for_each_buffer_cpu(buffer, cpu) {
842 for (i = 0; i < new_pages; i++) {
Steven Rostedt044fa782008-12-02 23:50:03 -0500843 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400844 cache_line_size()),
845 GFP_KERNEL, cpu_to_node(cpu));
Steven Rostedt044fa782008-12-02 23:50:03 -0500846 if (!bpage)
Steven Rostedte4c2ce82008-10-01 11:14:54 -0400847 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500848 list_add(&bpage->list, &pages);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400849 addr = __get_free_page(GFP_KERNEL);
850 if (!addr)
851 goto free_pages;
Steven Rostedt044fa782008-12-02 23:50:03 -0500852 bpage->page = (void *)addr;
853 rb_init_page(bpage->page);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400854 }
855 }
856
857 for_each_buffer_cpu(buffer, cpu) {
858 cpu_buffer = buffer->buffers[cpu];
859 rb_insert_pages(cpu_buffer, &pages, new_pages);
860 }
861
Steven Rostedt554f7862009-03-11 22:00:13 -0400862 if (RB_WARN_ON(buffer, !list_empty(&pages)))
863 goto out_fail;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400864
865 out:
866 buffer->pages = nr_pages;
Steven Rostedt554f7862009-03-11 22:00:13 -0400867 put_online_cpus();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400868 mutex_unlock(&buffer->mutex);
869
870 return size;
871
872 free_pages:
Steven Rostedt044fa782008-12-02 23:50:03 -0500873 list_for_each_entry_safe(bpage, tmp, &pages, list) {
874 list_del_init(&bpage->list);
875 free_buffer_page(bpage);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400876 }
Steven Rostedt554f7862009-03-11 22:00:13 -0400877 put_online_cpus();
Vegard Nossum641d2f62008-11-18 19:22:13 +0100878 mutex_unlock(&buffer->mutex);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400879 return -ENOMEM;
Steven Rostedt554f7862009-03-11 22:00:13 -0400880
881 /*
882 * Something went totally wrong, and we are too paranoid
883 * to even clean up the mess.
884 */
885 out_fail:
886 put_online_cpus();
887 mutex_unlock(&buffer->mutex);
888 return -1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400889}
Robert Richterc4f50182008-12-11 16:49:22 +0100890EXPORT_SYMBOL_GPL(ring_buffer_resize);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400891
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500892static inline void *
Steven Rostedt044fa782008-12-02 23:50:03 -0500893__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500894{
Steven Rostedt044fa782008-12-02 23:50:03 -0500895 return bpage->data + index;
Steven Rostedt8789a9e2008-12-02 15:34:07 -0500896}
897
Steven Rostedt044fa782008-12-02 23:50:03 -0500898static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400899{
Steven Rostedt044fa782008-12-02 23:50:03 -0500900 return bpage->page->data + index;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400901}
902
903static inline struct ring_buffer_event *
Steven Rostedtd7690412008-10-01 00:29:53 -0400904rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400905{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400906 return __rb_page_index(cpu_buffer->reader_page,
907 cpu_buffer->reader_page->read);
908}
909
910static inline struct ring_buffer_event *
911rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
912{
913 return __rb_page_index(cpu_buffer->head_page,
914 cpu_buffer->head_page->read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400915}
916
917static inline struct ring_buffer_event *
918rb_iter_head_event(struct ring_buffer_iter *iter)
919{
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400920 return __rb_page_index(iter->head_page, iter->head);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400921}
922
Steven Rostedtbf41a152008-10-04 02:00:59 -0400923static inline unsigned rb_page_write(struct buffer_page *bpage)
924{
925 return local_read(&bpage->write);
926}
927
928static inline unsigned rb_page_commit(struct buffer_page *bpage)
929{
Steven Rostedtabc9b562008-12-02 15:34:06 -0500930 return local_read(&bpage->page->commit);
Steven Rostedtbf41a152008-10-04 02:00:59 -0400931}
932
933/* Size is determined by what has been commited */
934static inline unsigned rb_page_size(struct buffer_page *bpage)
935{
936 return rb_page_commit(bpage);
937}
938
939static inline unsigned
940rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
941{
942 return rb_page_commit(cpu_buffer->commit_page);
943}
944
945static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
946{
947 return rb_page_commit(cpu_buffer->head_page);
948}
949
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400950/*
951 * When the tail hits the head and the buffer is in overwrite mode,
952 * the head jumps to the next page and all content on the previous
953 * page is discarded. But before doing so, we update the overrun
954 * variable of the buffer.
955 */
956static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
957{
958 struct ring_buffer_event *event;
959 unsigned long head;
960
961 for (head = 0; head < rb_head_size(cpu_buffer);
962 head += rb_event_length(event)) {
963
Steven Rostedt6f807ac2008-10-04 02:00:58 -0400964 event = __rb_page_index(cpu_buffer->head_page, head);
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -0500965 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
966 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400967 /* Only count data entries */
968 if (event->type != RINGBUF_TYPE_DATA)
969 continue;
970 cpu_buffer->overrun++;
971 cpu_buffer->entries--;
972 }
973}
974
975static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
Steven Rostedt044fa782008-12-02 23:50:03 -0500976 struct buffer_page **bpage)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400977{
Steven Rostedt044fa782008-12-02 23:50:03 -0500978 struct list_head *p = (*bpage)->list.next;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400979
980 if (p == &cpu_buffer->pages)
981 p = p->next;
982
Steven Rostedt044fa782008-12-02 23:50:03 -0500983 *bpage = list_entry(p, struct buffer_page, list);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400984}
985
Steven Rostedtbf41a152008-10-04 02:00:59 -0400986static inline unsigned
987rb_event_index(struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400988{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400989 unsigned long addr = (unsigned long)event;
990
991 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400992}
993
Andrew Morton34a148b2009-01-09 12:27:09 -0800994static int
Steven Rostedtbf41a152008-10-04 02:00:59 -0400995rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
996 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -0400997{
Steven Rostedtbf41a152008-10-04 02:00:59 -0400998 unsigned long addr = (unsigned long)event;
999 unsigned long index;
1000
1001 index = rb_event_index(event);
1002 addr &= PAGE_MASK;
1003
1004 return cpu_buffer->commit_page->page == (void *)addr &&
1005 rb_commit_index(cpu_buffer) == index;
1006}
1007
Andrew Morton34a148b2009-01-09 12:27:09 -08001008static void
Steven Rostedtbf41a152008-10-04 02:00:59 -04001009rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1010 struct ring_buffer_event *event)
1011{
1012 unsigned long addr = (unsigned long)event;
1013 unsigned long index;
1014
1015 index = rb_event_index(event);
1016 addr &= PAGE_MASK;
1017
1018 while (cpu_buffer->commit_page->page != (void *)addr) {
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05001019 if (RB_WARN_ON(cpu_buffer,
1020 cpu_buffer->commit_page == cpu_buffer->tail_page))
1021 return;
Steven Rostedtabc9b562008-12-02 15:34:06 -05001022 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001023 cpu_buffer->commit_page->write;
1024 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001025 cpu_buffer->write_stamp =
1026 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001027 }
1028
1029 /* Now set the commit to the event's index */
Steven Rostedtabc9b562008-12-02 15:34:06 -05001030 local_set(&cpu_buffer->commit_page->page->commit, index);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001031}
1032
Andrew Morton34a148b2009-01-09 12:27:09 -08001033static void
Steven Rostedtbf41a152008-10-04 02:00:59 -04001034rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1035{
1036 /*
1037 * We only race with interrupts and NMIs on this CPU.
1038 * If we own the commit event, then we can commit
1039 * all others that interrupted us, since the interruptions
1040 * are in stack format (they finish before they come
1041 * back to us). This allows us to do a simple loop to
1042 * assign the commit to the tail.
1043 */
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001044 again:
Steven Rostedtbf41a152008-10-04 02:00:59 -04001045 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001046 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001047 cpu_buffer->commit_page->write;
1048 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001049 cpu_buffer->write_stamp =
1050 cpu_buffer->commit_page->page->time_stamp;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001051 /* add barrier to keep gcc from optimizing too much */
1052 barrier();
1053 }
1054 while (rb_commit_index(cpu_buffer) !=
1055 rb_page_write(cpu_buffer->commit_page)) {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001056 cpu_buffer->commit_page->page->commit =
Steven Rostedtbf41a152008-10-04 02:00:59 -04001057 cpu_buffer->commit_page->write;
1058 barrier();
1059 }
Steven Rostedta8ccf1d2008-12-23 11:32:24 -05001060
1061 /* again, keep gcc from optimizing */
1062 barrier();
1063
1064 /*
1065 * If an interrupt came in just after the first while loop
1066 * and pushed the tail page forward, we will be left with
1067 * a dangling commit that will never go forward.
1068 */
1069 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1070 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001071}
1072
Steven Rostedtd7690412008-10-01 00:29:53 -04001073static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001074{
Steven Rostedtabc9b562008-12-02 15:34:06 -05001075 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001076 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001077}
1078
Andrew Morton34a148b2009-01-09 12:27:09 -08001079static void rb_inc_iter(struct ring_buffer_iter *iter)
Steven Rostedtd7690412008-10-01 00:29:53 -04001080{
1081 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1082
1083 /*
1084 * The iterator could be on the reader page (it starts there).
1085 * But the head could have moved, since the reader was
1086 * found. Check for this case and assign the iterator
1087 * to the head page instead of next.
1088 */
1089 if (iter->head_page == cpu_buffer->reader_page)
1090 iter->head_page = cpu_buffer->head_page;
1091 else
1092 rb_inc_page(cpu_buffer, &iter->head_page);
1093
Steven Rostedtabc9b562008-12-02 15:34:06 -05001094 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001095 iter->head = 0;
1096}
1097
1098/**
1099 * ring_buffer_update_event - update event type and data
1100 * @event: the even to update
1101 * @type: the type of event
1102 * @length: the size of the event field in the ring buffer
1103 *
1104 * Update the type and data fields of the event. The length
1105 * is the actual size that is written to the ring buffer,
1106 * and with this, we can determine what to place into the
1107 * data field.
1108 */
Andrew Morton34a148b2009-01-09 12:27:09 -08001109static void
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001110rb_update_event(struct ring_buffer_event *event,
1111 unsigned type, unsigned length)
1112{
1113 event->type = type;
1114
1115 switch (type) {
1116
1117 case RINGBUF_TYPE_PADDING:
1118 break;
1119
1120 case RINGBUF_TYPE_TIME_EXTEND:
Andrew Morton67d34722009-01-09 12:27:09 -08001121 event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001122 break;
1123
1124 case RINGBUF_TYPE_TIME_STAMP:
Andrew Morton67d34722009-01-09 12:27:09 -08001125 event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001126 break;
1127
1128 case RINGBUF_TYPE_DATA:
1129 length -= RB_EVNT_HDR_SIZE;
1130 if (length > RB_MAX_SMALL_DATA) {
1131 event->len = 0;
1132 event->array[0] = length;
1133 } else
Andrew Morton67d34722009-01-09 12:27:09 -08001134 event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001135 break;
1136 default:
1137 BUG();
1138 }
1139}
1140
Andrew Morton34a148b2009-01-09 12:27:09 -08001141static unsigned rb_calculate_event_length(unsigned length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001142{
1143 struct ring_buffer_event event; /* Used only for sizeof array */
1144
1145 /* zero length can cause confusions */
1146 if (!length)
1147 length = 1;
1148
1149 if (length > RB_MAX_SMALL_DATA)
1150 length += sizeof(event.array[0]);
1151
1152 length += RB_EVNT_HDR_SIZE;
1153 length = ALIGN(length, RB_ALIGNMENT);
1154
1155 return length;
1156}
1157
1158static struct ring_buffer_event *
1159__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1160 unsigned type, unsigned long length, u64 *ts)
1161{
Steven Rostedt98db8df2008-12-23 11:32:25 -05001162 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001163 unsigned long tail, write;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001164 struct ring_buffer *buffer = cpu_buffer->buffer;
1165 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001166 unsigned long flags;
Steven Rostedt78d904b2009-02-05 18:43:07 -05001167 bool lock_taken = false;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001168
Steven Rostedt98db8df2008-12-23 11:32:25 -05001169 commit_page = cpu_buffer->commit_page;
1170 /* we just need to protect against interrupts */
1171 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001172 tail_page = cpu_buffer->tail_page;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001173 write = local_add_return(length, &tail_page->write);
1174 tail = write - length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001175
Steven Rostedtbf41a152008-10-04 02:00:59 -04001176 /* See if we shot pass the end of this buffer page */
1177 if (write > BUF_PAGE_SIZE) {
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001178 struct buffer_page *next_page = tail_page;
1179
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001180 local_irq_save(flags);
Steven Rostedt78d904b2009-02-05 18:43:07 -05001181 /*
Steven Rostedta81bd802009-02-06 01:45:16 -05001182 * Since the write to the buffer is still not
1183 * fully lockless, we must be careful with NMIs.
1184 * The locks in the writers are taken when a write
1185 * crosses to a new page. The locks protect against
1186 * races with the readers (this will soon be fixed
1187 * with a lockless solution).
1188 *
1189 * Because we can not protect against NMIs, and we
1190 * want to keep traces reentrant, we need to manage
1191 * what happens when we are in an NMI.
1192 *
Steven Rostedt78d904b2009-02-05 18:43:07 -05001193 * NMIs can happen after we take the lock.
1194 * If we are in an NMI, only take the lock
1195 * if it is not already taken. Otherwise
1196 * simply fail.
1197 */
Steven Rostedta81bd802009-02-06 01:45:16 -05001198 if (unlikely(in_nmi())) {
Steven Rostedt78d904b2009-02-05 18:43:07 -05001199 if (!__raw_spin_trylock(&cpu_buffer->lock))
Steven Rostedt45141d42009-02-12 13:19:48 -05001200 goto out_reset;
Steven Rostedt78d904b2009-02-05 18:43:07 -05001201 } else
1202 __raw_spin_lock(&cpu_buffer->lock);
1203
1204 lock_taken = true;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001205
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001206 rb_inc_page(cpu_buffer, &next_page);
1207
Steven Rostedtd7690412008-10-01 00:29:53 -04001208 head_page = cpu_buffer->head_page;
1209 reader_page = cpu_buffer->reader_page;
1210
1211 /* we grabbed the lock before incrementing */
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05001212 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
Steven Rostedt45141d42009-02-12 13:19:48 -05001213 goto out_reset;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001214
1215 /*
1216 * If for some reason, we had an interrupt storm that made
1217 * it all the way around the buffer, bail, and warn
1218 * about it.
1219 */
Steven Rostedt98db8df2008-12-23 11:32:25 -05001220 if (unlikely(next_page == commit_page)) {
Steven Rostedtbf41a152008-10-04 02:00:59 -04001221 WARN_ON_ONCE(1);
Steven Rostedt45141d42009-02-12 13:19:48 -05001222 goto out_reset;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001223 }
Steven Rostedtd7690412008-10-01 00:29:53 -04001224
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001225 if (next_page == head_page) {
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001226 if (!(buffer->flags & RB_FL_OVERWRITE))
Steven Rostedt45141d42009-02-12 13:19:48 -05001227 goto out_reset;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001228
Steven Rostedtbf41a152008-10-04 02:00:59 -04001229 /* tail_page has not moved yet? */
1230 if (tail_page == cpu_buffer->tail_page) {
1231 /* count overflows */
1232 rb_update_overflow(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001233
Steven Rostedtbf41a152008-10-04 02:00:59 -04001234 rb_inc_page(cpu_buffer, &head_page);
1235 cpu_buffer->head_page = head_page;
1236 cpu_buffer->head_page->read = 0;
1237 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001238 }
1239
Steven Rostedtbf41a152008-10-04 02:00:59 -04001240 /*
1241 * If the tail page is still the same as what we think
1242 * it is, then it is up to us to update the tail
1243 * pointer.
1244 */
1245 if (tail_page == cpu_buffer->tail_page) {
1246 local_set(&next_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001247 local_set(&next_page->page->commit, 0);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001248 cpu_buffer->tail_page = next_page;
1249
1250 /* reread the time stamp */
Steven Rostedt37886f62009-03-17 17:22:06 -04001251 *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001252 cpu_buffer->tail_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001253 }
1254
1255 /*
1256 * The actual tail page has moved forward.
1257 */
1258 if (tail < BUF_PAGE_SIZE) {
1259 /* Mark the rest of the page with padding */
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001260 event = __rb_page_index(tail_page, tail);
Tom Zanussi2d622712009-03-22 03:30:49 -05001261 rb_event_set_padding(event);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001262 }
1263
Steven Rostedtbf41a152008-10-04 02:00:59 -04001264 if (tail <= BUF_PAGE_SIZE)
1265 /* Set the write back to the previous setting */
1266 local_set(&tail_page->write, tail);
1267
1268 /*
1269 * If this was a commit entry that failed,
1270 * increment that too
1271 */
1272 if (tail_page == cpu_buffer->commit_page &&
1273 tail == rb_commit_index(cpu_buffer)) {
1274 rb_set_commit_to_write(cpu_buffer);
1275 }
1276
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001277 __raw_spin_unlock(&cpu_buffer->lock);
1278 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001279
1280 /* fail and let the caller try again */
1281 return ERR_PTR(-EAGAIN);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001282 }
1283
Steven Rostedtbf41a152008-10-04 02:00:59 -04001284 /* We reserved something on the buffer */
1285
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05001286 if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1287 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001288
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001289 event = __rb_page_index(tail_page, tail);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001290 rb_update_event(event, type, length);
1291
Steven Rostedtbf41a152008-10-04 02:00:59 -04001292 /*
1293 * If this is a commit and the tail is zero, then update
1294 * this page's time stamp.
1295 */
1296 if (!tail && rb_is_commit(cpu_buffer, event))
Steven Rostedtabc9b562008-12-02 15:34:06 -05001297 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001298
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001299 return event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001300
Steven Rostedt45141d42009-02-12 13:19:48 -05001301 out_reset:
Lai Jiangshan6f3b3442009-01-12 11:06:18 +08001302 /* reset write */
1303 if (tail <= BUF_PAGE_SIZE)
1304 local_set(&tail_page->write, tail);
1305
Steven Rostedt78d904b2009-02-05 18:43:07 -05001306 if (likely(lock_taken))
1307 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001308 local_irq_restore(flags);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001309 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001310}
1311
1312static int
1313rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1314 u64 *ts, u64 *delta)
1315{
1316 struct ring_buffer_event *event;
1317 static int once;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001318 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001319
1320 if (unlikely(*delta > (1ULL << 59) && !once++)) {
1321 printk(KERN_WARNING "Delta way too big! %llu"
1322 " ts=%llu write stamp = %llu\n",
Stephen Rothwelle2862c92008-10-27 17:43:28 +11001323 (unsigned long long)*delta,
1324 (unsigned long long)*ts,
1325 (unsigned long long)cpu_buffer->write_stamp);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001326 WARN_ON(1);
1327 }
1328
1329 /*
1330 * The delta is too big, we to add a
1331 * new timestamp.
1332 */
1333 event = __rb_reserve_next(cpu_buffer,
1334 RINGBUF_TYPE_TIME_EXTEND,
1335 RB_LEN_TIME_EXTEND,
1336 ts);
1337 if (!event)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001338 return -EBUSY;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001339
Steven Rostedtbf41a152008-10-04 02:00:59 -04001340 if (PTR_ERR(event) == -EAGAIN)
1341 return -EAGAIN;
1342
1343 /* Only a commited time event can update the write stamp */
1344 if (rb_is_commit(cpu_buffer, event)) {
1345 /*
1346 * If this is the first on the page, then we need to
1347 * update the page itself, and just put in a zero.
1348 */
1349 if (rb_event_index(event)) {
1350 event->time_delta = *delta & TS_MASK;
1351 event->array[0] = *delta >> TS_SHIFT;
1352 } else {
Steven Rostedtabc9b562008-12-02 15:34:06 -05001353 cpu_buffer->commit_page->page->time_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001354 event->time_delta = 0;
1355 event->array[0] = 0;
1356 }
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001357 cpu_buffer->write_stamp = *ts;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001358 /* let the caller know this was the commit */
1359 ret = 1;
1360 } else {
1361 /* Darn, this is just wasted space */
1362 event->time_delta = 0;
1363 event->array[0] = 0;
1364 ret = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001365 }
1366
Steven Rostedtbf41a152008-10-04 02:00:59 -04001367 *delta = 0;
1368
1369 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001370}
1371
1372static struct ring_buffer_event *
1373rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1374 unsigned type, unsigned long length)
1375{
1376 struct ring_buffer_event *event;
1377 u64 ts, delta;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001378 int commit = 0;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001379 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001380
Steven Rostedtbf41a152008-10-04 02:00:59 -04001381 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001382 /*
1383 * We allow for interrupts to reenter here and do a trace.
1384 * If one does, it will cause this original code to loop
1385 * back here. Even with heavy interrupts happening, this
1386 * should only happen a few times in a row. If this happens
1387 * 1000 times in a row, there must be either an interrupt
1388 * storm or we have something buggy.
1389 * Bail!
1390 */
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05001391 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001392 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001393
Steven Rostedt37886f62009-03-17 17:22:06 -04001394 ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001395
Steven Rostedtbf41a152008-10-04 02:00:59 -04001396 /*
1397 * Only the first commit can update the timestamp.
1398 * Yes there is a race here. If an interrupt comes in
1399 * just after the conditional and it traces too, then it
1400 * will also check the deltas. More than one timestamp may
1401 * also be made. But only the entry that did the actual
1402 * commit will be something other than zero.
1403 */
1404 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1405 rb_page_write(cpu_buffer->tail_page) ==
1406 rb_commit_index(cpu_buffer)) {
1407
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001408 delta = ts - cpu_buffer->write_stamp;
1409
Steven Rostedtbf41a152008-10-04 02:00:59 -04001410 /* make sure this delta is calculated here */
1411 barrier();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001412
Steven Rostedtbf41a152008-10-04 02:00:59 -04001413 /* Did the write stamp get updated already? */
1414 if (unlikely(ts < cpu_buffer->write_stamp))
Steven Rostedt4143c5c2008-11-10 21:46:01 -05001415 delta = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001416
1417 if (test_time_stamp(delta)) {
1418
1419 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1420
1421 if (commit == -EBUSY)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001422 return NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001423
1424 if (commit == -EAGAIN)
1425 goto again;
1426
1427 RB_WARN_ON(cpu_buffer, commit < 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001428 }
Steven Rostedtbf41a152008-10-04 02:00:59 -04001429 } else
1430 /* Non commits have zero deltas */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001431 delta = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001432
1433 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
Steven Rostedtbf41a152008-10-04 02:00:59 -04001434 if (PTR_ERR(event) == -EAGAIN)
1435 goto again;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001436
Steven Rostedtbf41a152008-10-04 02:00:59 -04001437 if (!event) {
1438 if (unlikely(commit))
1439 /*
1440 * Ouch! We needed a timestamp and it was commited. But
1441 * we didn't get our event reserved.
1442 */
1443 rb_set_commit_to_write(cpu_buffer);
1444 return NULL;
1445 }
1446
1447 /*
1448 * If the timestamp was commited, make the commit our entry
1449 * now so that we will update it when needed.
1450 */
1451 if (commit)
1452 rb_set_commit_event(cpu_buffer, event);
1453 else if (!rb_is_commit(cpu_buffer, event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001454 delta = 0;
1455
1456 event->time_delta = delta;
1457
1458 return event;
1459}
1460
Steven Rostedtbf41a152008-10-04 02:00:59 -04001461static DEFINE_PER_CPU(int, rb_need_resched);
1462
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001463/**
1464 * ring_buffer_lock_reserve - reserve a part of the buffer
1465 * @buffer: the ring buffer to reserve from
1466 * @length: the length of the data to reserve (excluding event header)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001467 *
1468 * Returns a reseverd event on the ring buffer to copy directly to.
1469 * The user of this interface will need to get the body to write into
1470 * and can use the ring_buffer_event_data() interface.
1471 *
1472 * The length is the length of the data needed, not the event length
1473 * which also includes the event header.
1474 *
1475 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1476 * If NULL is returned, then nothing has been allocated or locked.
1477 */
1478struct ring_buffer_event *
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001479ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001480{
1481 struct ring_buffer_per_cpu *cpu_buffer;
1482 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001483 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001484
Steven Rostedt033601a2008-11-21 12:41:55 -05001485 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001486 return NULL;
1487
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001488 if (atomic_read(&buffer->record_disabled))
1489 return NULL;
1490
Steven Rostedtbf41a152008-10-04 02:00:59 -04001491 /* If we are tracing schedule, we don't want to recurse */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001492 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001493
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001494 cpu = raw_smp_processor_id();
1495
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301496 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001497 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001498
1499 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001500
1501 if (atomic_read(&cpu_buffer->record_disabled))
Steven Rostedtd7690412008-10-01 00:29:53 -04001502 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001503
1504 length = rb_calculate_event_length(length);
1505 if (length > BUF_PAGE_SIZE)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001506 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001507
1508 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1509 if (!event)
Steven Rostedtd7690412008-10-01 00:29:53 -04001510 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001511
Steven Rostedtbf41a152008-10-04 02:00:59 -04001512 /*
1513 * Need to store resched state on this cpu.
1514 * Only the first needs to.
1515 */
1516
1517 if (preempt_count() == 1)
1518 per_cpu(rb_need_resched, cpu) = resched;
1519
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001520 return event;
1521
Steven Rostedtd7690412008-10-01 00:29:53 -04001522 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001523 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001524 return NULL;
1525}
Robert Richterc4f50182008-12-11 16:49:22 +01001526EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001527
1528static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1529 struct ring_buffer_event *event)
1530{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001531 cpu_buffer->entries++;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001532
1533 /* Only process further if we own the commit */
1534 if (!rb_is_commit(cpu_buffer, event))
1535 return;
1536
1537 cpu_buffer->write_stamp += event->time_delta;
1538
1539 rb_set_commit_to_write(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001540}
1541
1542/**
1543 * ring_buffer_unlock_commit - commit a reserved
1544 * @buffer: The buffer to commit to
1545 * @event: The event pointer to commit.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001546 *
1547 * This commits the data to the ring buffer, and releases any locks held.
1548 *
1549 * Must be paired with ring_buffer_lock_reserve.
1550 */
1551int ring_buffer_unlock_commit(struct ring_buffer *buffer,
Arnaldo Carvalho de Melo0a987752009-02-05 16:12:56 -02001552 struct ring_buffer_event *event)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001553{
1554 struct ring_buffer_per_cpu *cpu_buffer;
1555 int cpu = raw_smp_processor_id();
1556
1557 cpu_buffer = buffer->buffers[cpu];
1558
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001559 rb_commit(cpu_buffer, event);
1560
Steven Rostedtbf41a152008-10-04 02:00:59 -04001561 /*
1562 * Only the last preempt count needs to restore preemption.
1563 */
Steven Rostedt182e9f52008-11-03 23:15:56 -05001564 if (preempt_count() == 1)
1565 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1566 else
Steven Rostedtbf41a152008-10-04 02:00:59 -04001567 preempt_enable_no_resched_notrace();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001568
1569 return 0;
1570}
Robert Richterc4f50182008-12-11 16:49:22 +01001571EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001572
1573/**
1574 * ring_buffer_write - write data to the buffer without reserving
1575 * @buffer: The ring buffer to write to.
1576 * @length: The length of the data being written (excluding the event header)
1577 * @data: The data to write to the buffer.
1578 *
1579 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1580 * one function. If you already have the data to write to the buffer, it
1581 * may be easier to simply call this function.
1582 *
1583 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1584 * and not the length of the event which would hold the header.
1585 */
1586int ring_buffer_write(struct ring_buffer *buffer,
1587 unsigned long length,
1588 void *data)
1589{
1590 struct ring_buffer_per_cpu *cpu_buffer;
1591 struct ring_buffer_event *event;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001592 unsigned long event_length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001593 void *body;
1594 int ret = -EBUSY;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001595 int cpu, resched;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001596
Steven Rostedt033601a2008-11-21 12:41:55 -05001597 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedta3583242008-11-11 15:01:42 -05001598 return -EBUSY;
1599
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001600 if (atomic_read(&buffer->record_disabled))
1601 return -EBUSY;
1602
Steven Rostedt182e9f52008-11-03 23:15:56 -05001603 resched = ftrace_preempt_disable();
Steven Rostedtbf41a152008-10-04 02:00:59 -04001604
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001605 cpu = raw_smp_processor_id();
1606
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301607 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedtd7690412008-10-01 00:29:53 -04001608 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001609
1610 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001611
1612 if (atomic_read(&cpu_buffer->record_disabled))
1613 goto out;
1614
1615 event_length = rb_calculate_event_length(length);
1616 event = rb_reserve_next_event(cpu_buffer,
1617 RINGBUF_TYPE_DATA, event_length);
1618 if (!event)
1619 goto out;
1620
1621 body = rb_event_data(event);
1622
1623 memcpy(body, data, length);
1624
1625 rb_commit(cpu_buffer, event);
1626
1627 ret = 0;
1628 out:
Steven Rostedt182e9f52008-11-03 23:15:56 -05001629 ftrace_preempt_enable(resched);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001630
1631 return ret;
1632}
Robert Richterc4f50182008-12-11 16:49:22 +01001633EXPORT_SYMBOL_GPL(ring_buffer_write);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001634
Andrew Morton34a148b2009-01-09 12:27:09 -08001635static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedtbf41a152008-10-04 02:00:59 -04001636{
1637 struct buffer_page *reader = cpu_buffer->reader_page;
1638 struct buffer_page *head = cpu_buffer->head_page;
1639 struct buffer_page *commit = cpu_buffer->commit_page;
1640
1641 return reader->read == rb_page_commit(reader) &&
1642 (commit == reader ||
1643 (commit == head &&
1644 head->read == rb_page_commit(commit)));
1645}
1646
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001647/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001648 * ring_buffer_record_disable - stop all writes into the buffer
1649 * @buffer: The ring buffer to stop writes to.
1650 *
1651 * This prevents all writes to the buffer. Any attempt to write
1652 * to the buffer after this will fail and return NULL.
1653 *
1654 * The caller should call synchronize_sched() after this.
1655 */
1656void ring_buffer_record_disable(struct ring_buffer *buffer)
1657{
1658 atomic_inc(&buffer->record_disabled);
1659}
Robert Richterc4f50182008-12-11 16:49:22 +01001660EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001661
1662/**
1663 * ring_buffer_record_enable - enable writes to the buffer
1664 * @buffer: The ring buffer to enable writes
1665 *
1666 * Note, multiple disables will need the same number of enables
1667 * to truely enable the writing (much like preempt_disable).
1668 */
1669void ring_buffer_record_enable(struct ring_buffer *buffer)
1670{
1671 atomic_dec(&buffer->record_disabled);
1672}
Robert Richterc4f50182008-12-11 16:49:22 +01001673EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001674
1675/**
1676 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1677 * @buffer: The ring buffer to stop writes to.
1678 * @cpu: The CPU buffer to stop
1679 *
1680 * This prevents all writes to the buffer. Any attempt to write
1681 * to the buffer after this will fail and return NULL.
1682 *
1683 * The caller should call synchronize_sched() after this.
1684 */
1685void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1686{
1687 struct ring_buffer_per_cpu *cpu_buffer;
1688
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301689 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001690 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001691
1692 cpu_buffer = buffer->buffers[cpu];
1693 atomic_inc(&cpu_buffer->record_disabled);
1694}
Robert Richterc4f50182008-12-11 16:49:22 +01001695EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001696
1697/**
1698 * ring_buffer_record_enable_cpu - enable writes to the buffer
1699 * @buffer: The ring buffer to enable writes
1700 * @cpu: The CPU to enable.
1701 *
1702 * Note, multiple disables will need the same number of enables
1703 * to truely enable the writing (much like preempt_disable).
1704 */
1705void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1706{
1707 struct ring_buffer_per_cpu *cpu_buffer;
1708
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301709 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001710 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001711
1712 cpu_buffer = buffer->buffers[cpu];
1713 atomic_dec(&cpu_buffer->record_disabled);
1714}
Robert Richterc4f50182008-12-11 16:49:22 +01001715EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001716
1717/**
1718 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1719 * @buffer: The ring buffer
1720 * @cpu: The per CPU buffer to get the entries from.
1721 */
1722unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1723{
1724 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001725 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001726
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301727 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001728 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001729
1730 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04001731 ret = cpu_buffer->entries;
Steven Rostedt554f7862009-03-11 22:00:13 -04001732
1733 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001734}
Robert Richterc4f50182008-12-11 16:49:22 +01001735EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001736
1737/**
1738 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1739 * @buffer: The ring buffer
1740 * @cpu: The per CPU buffer to get the number of overruns from
1741 */
1742unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1743{
1744 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04001745 unsigned long ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001746
Rusty Russell9e01c1b2009-01-01 10:12:22 +10301747 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04001748 return 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001749
1750 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04001751 ret = cpu_buffer->overrun;
Steven Rostedt554f7862009-03-11 22:00:13 -04001752
1753 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001754}
Robert Richterc4f50182008-12-11 16:49:22 +01001755EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001756
1757/**
1758 * ring_buffer_entries - get the number of entries in a buffer
1759 * @buffer: The ring buffer
1760 *
1761 * Returns the total number of entries in the ring buffer
1762 * (all CPU entries)
1763 */
1764unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1765{
1766 struct ring_buffer_per_cpu *cpu_buffer;
1767 unsigned long entries = 0;
1768 int cpu;
1769
1770 /* if you care about this being correct, lock the buffer */
1771 for_each_buffer_cpu(buffer, cpu) {
1772 cpu_buffer = buffer->buffers[cpu];
1773 entries += cpu_buffer->entries;
1774 }
1775
1776 return entries;
1777}
Robert Richterc4f50182008-12-11 16:49:22 +01001778EXPORT_SYMBOL_GPL(ring_buffer_entries);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001779
1780/**
1781 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1782 * @buffer: The ring buffer
1783 *
1784 * Returns the total number of overruns in the ring buffer
1785 * (all CPU entries)
1786 */
1787unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1788{
1789 struct ring_buffer_per_cpu *cpu_buffer;
1790 unsigned long overruns = 0;
1791 int cpu;
1792
1793 /* if you care about this being correct, lock the buffer */
1794 for_each_buffer_cpu(buffer, cpu) {
1795 cpu_buffer = buffer->buffers[cpu];
1796 overruns += cpu_buffer->overrun;
1797 }
1798
1799 return overruns;
1800}
Robert Richterc4f50182008-12-11 16:49:22 +01001801EXPORT_SYMBOL_GPL(ring_buffer_overruns);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001802
Steven Rostedt642edba2008-11-12 00:01:26 -05001803static void rb_iter_reset(struct ring_buffer_iter *iter)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001804{
1805 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1806
Steven Rostedtd7690412008-10-01 00:29:53 -04001807 /* Iterator usage is expected to have record disabled */
1808 if (list_empty(&cpu_buffer->reader_page->list)) {
1809 iter->head_page = cpu_buffer->head_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001810 iter->head = cpu_buffer->head_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001811 } else {
1812 iter->head_page = cpu_buffer->reader_page;
Steven Rostedt6f807ac2008-10-04 02:00:58 -04001813 iter->head = cpu_buffer->reader_page->read;
Steven Rostedtd7690412008-10-01 00:29:53 -04001814 }
1815 if (iter->head)
1816 iter->read_stamp = cpu_buffer->read_stamp;
1817 else
Steven Rostedtabc9b562008-12-02 15:34:06 -05001818 iter->read_stamp = iter->head_page->page->time_stamp;
Steven Rostedt642edba2008-11-12 00:01:26 -05001819}
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001820
Steven Rostedt642edba2008-11-12 00:01:26 -05001821/**
1822 * ring_buffer_iter_reset - reset an iterator
1823 * @iter: The iterator to reset
1824 *
1825 * Resets the iterator, so that it will start from the beginning
1826 * again.
1827 */
1828void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1829{
Steven Rostedt554f7862009-03-11 22:00:13 -04001830 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt642edba2008-11-12 00:01:26 -05001831 unsigned long flags;
1832
Steven Rostedt554f7862009-03-11 22:00:13 -04001833 if (!iter)
1834 return;
1835
1836 cpu_buffer = iter->cpu_buffer;
1837
Steven Rostedt642edba2008-11-12 00:01:26 -05001838 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1839 rb_iter_reset(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01001840 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001841}
Robert Richterc4f50182008-12-11 16:49:22 +01001842EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001843
1844/**
1845 * ring_buffer_iter_empty - check if an iterator has no more to read
1846 * @iter: The iterator to check
1847 */
1848int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1849{
1850 struct ring_buffer_per_cpu *cpu_buffer;
1851
1852 cpu_buffer = iter->cpu_buffer;
1853
Steven Rostedtbf41a152008-10-04 02:00:59 -04001854 return iter->head_page == cpu_buffer->commit_page &&
1855 iter->head == rb_commit_index(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001856}
Robert Richterc4f50182008-12-11 16:49:22 +01001857EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001858
1859static void
1860rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1861 struct ring_buffer_event *event)
1862{
1863 u64 delta;
1864
1865 switch (event->type) {
1866 case RINGBUF_TYPE_PADDING:
1867 return;
1868
1869 case RINGBUF_TYPE_TIME_EXTEND:
1870 delta = event->array[0];
1871 delta <<= TS_SHIFT;
1872 delta += event->time_delta;
1873 cpu_buffer->read_stamp += delta;
1874 return;
1875
1876 case RINGBUF_TYPE_TIME_STAMP:
1877 /* FIXME: not implemented */
1878 return;
1879
1880 case RINGBUF_TYPE_DATA:
1881 cpu_buffer->read_stamp += event->time_delta;
1882 return;
1883
1884 default:
1885 BUG();
1886 }
1887 return;
1888}
1889
1890static void
1891rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1892 struct ring_buffer_event *event)
1893{
1894 u64 delta;
1895
1896 switch (event->type) {
1897 case RINGBUF_TYPE_PADDING:
1898 return;
1899
1900 case RINGBUF_TYPE_TIME_EXTEND:
1901 delta = event->array[0];
1902 delta <<= TS_SHIFT;
1903 delta += event->time_delta;
1904 iter->read_stamp += delta;
1905 return;
1906
1907 case RINGBUF_TYPE_TIME_STAMP:
1908 /* FIXME: not implemented */
1909 return;
1910
1911 case RINGBUF_TYPE_DATA:
1912 iter->read_stamp += event->time_delta;
1913 return;
1914
1915 default:
1916 BUG();
1917 }
1918 return;
1919}
1920
Steven Rostedtd7690412008-10-01 00:29:53 -04001921static struct buffer_page *
1922rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001923{
Steven Rostedtd7690412008-10-01 00:29:53 -04001924 struct buffer_page *reader = NULL;
1925 unsigned long flags;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001926 int nr_loops = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04001927
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001928 local_irq_save(flags);
1929 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedtd7690412008-10-01 00:29:53 -04001930
1931 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001932 /*
1933 * This should normally only loop twice. But because the
1934 * start of the reader inserts an empty page, it causes
1935 * a case where we will loop three times. There should be no
1936 * reason to loop four times (that I know of).
1937 */
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05001938 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
Steven Rostedt818e3dd2008-10-31 09:58:35 -04001939 reader = NULL;
1940 goto out;
1941 }
1942
Steven Rostedtd7690412008-10-01 00:29:53 -04001943 reader = cpu_buffer->reader_page;
1944
1945 /* If there's more to read, return this page */
Steven Rostedtbf41a152008-10-04 02:00:59 -04001946 if (cpu_buffer->reader_page->read < rb_page_size(reader))
Steven Rostedtd7690412008-10-01 00:29:53 -04001947 goto out;
1948
1949 /* Never should we have an index greater than the size */
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05001950 if (RB_WARN_ON(cpu_buffer,
1951 cpu_buffer->reader_page->read > rb_page_size(reader)))
1952 goto out;
Steven Rostedtd7690412008-10-01 00:29:53 -04001953
1954 /* check if we caught up to the tail */
1955 reader = NULL;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001956 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
Steven Rostedtd7690412008-10-01 00:29:53 -04001957 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001958
1959 /*
Steven Rostedtd7690412008-10-01 00:29:53 -04001960 * Splice the empty reader page into the list around the head.
1961 * Reset the reader page to size zero.
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001962 */
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04001963
Steven Rostedtd7690412008-10-01 00:29:53 -04001964 reader = cpu_buffer->head_page;
1965 cpu_buffer->reader_page->list.next = reader->list.next;
1966 cpu_buffer->reader_page->list.prev = reader->list.prev;
Steven Rostedtbf41a152008-10-04 02:00:59 -04001967
1968 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05001969 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedtd7690412008-10-01 00:29:53 -04001970
1971 /* Make the reader page now replace the head */
1972 reader->list.prev->next = &cpu_buffer->reader_page->list;
1973 reader->list.next->prev = &cpu_buffer->reader_page->list;
1974
1975 /*
1976 * If the tail is on the reader, then we must set the head
1977 * to the inserted page, otherwise we set it one before.
1978 */
1979 cpu_buffer->head_page = cpu_buffer->reader_page;
1980
Steven Rostedtbf41a152008-10-04 02:00:59 -04001981 if (cpu_buffer->commit_page != reader)
Steven Rostedtd7690412008-10-01 00:29:53 -04001982 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1983
1984 /* Finally update the reader page to the new head */
1985 cpu_buffer->reader_page = reader;
1986 rb_reset_reader_page(cpu_buffer);
1987
1988 goto again;
1989
1990 out:
Steven Rostedt3e03fb72008-11-06 00:09:43 -05001991 __raw_spin_unlock(&cpu_buffer->lock);
1992 local_irq_restore(flags);
Steven Rostedtd7690412008-10-01 00:29:53 -04001993
1994 return reader;
1995}
1996
1997static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1998{
1999 struct ring_buffer_event *event;
2000 struct buffer_page *reader;
2001 unsigned length;
2002
2003 reader = rb_get_reader_page(cpu_buffer);
2004
2005 /* This function should not be called when buffer is empty */
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05002006 if (RB_WARN_ON(cpu_buffer, !reader))
2007 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002008
2009 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002010
Tom Zanussi2d622712009-03-22 03:30:49 -05002011 if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002012 cpu_buffer->entries--;
2013
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002014 rb_update_read_stamp(cpu_buffer, event);
2015
Steven Rostedtd7690412008-10-01 00:29:53 -04002016 length = rb_event_length(event);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002017 cpu_buffer->reader_page->read += length;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002018}
2019
2020static void rb_advance_iter(struct ring_buffer_iter *iter)
2021{
2022 struct ring_buffer *buffer;
2023 struct ring_buffer_per_cpu *cpu_buffer;
2024 struct ring_buffer_event *event;
2025 unsigned length;
2026
2027 cpu_buffer = iter->cpu_buffer;
2028 buffer = cpu_buffer->buffer;
2029
2030 /*
2031 * Check if we are at the end of the buffer.
2032 */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002033 if (iter->head >= rb_page_size(iter->head_page)) {
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05002034 if (RB_WARN_ON(buffer,
2035 iter->head_page == cpu_buffer->commit_page))
2036 return;
Steven Rostedtd7690412008-10-01 00:29:53 -04002037 rb_inc_iter(iter);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002038 return;
2039 }
2040
2041 event = rb_iter_head_event(iter);
2042
2043 length = rb_event_length(event);
2044
2045 /*
2046 * This should not be called to advance the header if we are
2047 * at the tail of the buffer.
2048 */
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05002049 if (RB_WARN_ON(cpu_buffer,
Steven Rostedtf536aaf2008-11-10 23:07:30 -05002050 (iter->head_page == cpu_buffer->commit_page) &&
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05002051 (iter->head + length > rb_commit_index(cpu_buffer))))
2052 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002053
2054 rb_update_iter_read_stamp(iter, event);
2055
2056 iter->head += length;
2057
2058 /* check for end of page padding */
Steven Rostedtbf41a152008-10-04 02:00:59 -04002059 if ((iter->head >= rb_page_size(iter->head_page)) &&
2060 (iter->head_page != cpu_buffer->commit_page))
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002061 rb_advance_iter(iter);
2062}
2063
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002064static struct ring_buffer_event *
2065rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002066{
2067 struct ring_buffer_per_cpu *cpu_buffer;
2068 struct ring_buffer_event *event;
Steven Rostedtd7690412008-10-01 00:29:53 -04002069 struct buffer_page *reader;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002070 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002071
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002072 cpu_buffer = buffer->buffers[cpu];
2073
2074 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002075 /*
2076 * We repeat when a timestamp is encountered. It is possible
2077 * to get multiple timestamps from an interrupt entering just
2078 * as one timestamp is about to be written. The max times
2079 * that this can happen is the number of nested interrupts we
2080 * can have. Nesting 10 deep of interrupts is clearly
2081 * an anomaly.
2082 */
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05002083 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002084 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002085
Steven Rostedtd7690412008-10-01 00:29:53 -04002086 reader = rb_get_reader_page(cpu_buffer);
2087 if (!reader)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002088 return NULL;
2089
Steven Rostedtd7690412008-10-01 00:29:53 -04002090 event = rb_reader_event(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002091
2092 switch (event->type) {
2093 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05002094 if (rb_null_event(event))
2095 RB_WARN_ON(cpu_buffer, 1);
2096 /*
2097 * Because the writer could be discarding every
2098 * event it creates (which would probably be bad)
2099 * if we were to go back to "again" then we may never
2100 * catch up, and will trigger the warn on, or lock
2101 * the box. Return the padding, and we will release
2102 * the current locks, and try again.
2103 */
Steven Rostedtd7690412008-10-01 00:29:53 -04002104 rb_advance_reader(cpu_buffer);
Tom Zanussi2d622712009-03-22 03:30:49 -05002105 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002106
2107 case RINGBUF_TYPE_TIME_EXTEND:
2108 /* Internal data, OK to advance */
Steven Rostedtd7690412008-10-01 00:29:53 -04002109 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002110 goto again;
2111
2112 case RINGBUF_TYPE_TIME_STAMP:
2113 /* FIXME: not implemented */
Steven Rostedtd7690412008-10-01 00:29:53 -04002114 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002115 goto again;
2116
2117 case RINGBUF_TYPE_DATA:
2118 if (ts) {
2119 *ts = cpu_buffer->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002120 ring_buffer_normalize_time_stamp(buffer,
2121 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002122 }
2123 return event;
2124
2125 default:
2126 BUG();
2127 }
2128
2129 return NULL;
2130}
Robert Richterc4f50182008-12-11 16:49:22 +01002131EXPORT_SYMBOL_GPL(ring_buffer_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002132
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002133static struct ring_buffer_event *
2134rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002135{
2136 struct ring_buffer *buffer;
2137 struct ring_buffer_per_cpu *cpu_buffer;
2138 struct ring_buffer_event *event;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002139 int nr_loops = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002140
2141 if (ring_buffer_iter_empty(iter))
2142 return NULL;
2143
2144 cpu_buffer = iter->cpu_buffer;
2145 buffer = cpu_buffer->buffer;
2146
2147 again:
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002148 /*
2149 * We repeat when a timestamp is encountered. It is possible
2150 * to get multiple timestamps from an interrupt entering just
2151 * as one timestamp is about to be written. The max times
2152 * that this can happen is the number of nested interrupts we
2153 * can have. Nesting 10 deep of interrupts is clearly
2154 * an anomaly.
2155 */
Steven Rostedt3e89c7bb2008-11-11 15:28:41 -05002156 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002157 return NULL;
Steven Rostedt818e3dd2008-10-31 09:58:35 -04002158
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002159 if (rb_per_cpu_empty(cpu_buffer))
2160 return NULL;
2161
2162 event = rb_iter_head_event(iter);
2163
2164 switch (event->type) {
2165 case RINGBUF_TYPE_PADDING:
Tom Zanussi2d622712009-03-22 03:30:49 -05002166 if (rb_null_event(event)) {
2167 rb_inc_iter(iter);
2168 goto again;
2169 }
2170 rb_advance_iter(iter);
2171 return event;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002172
2173 case RINGBUF_TYPE_TIME_EXTEND:
2174 /* Internal data, OK to advance */
2175 rb_advance_iter(iter);
2176 goto again;
2177
2178 case RINGBUF_TYPE_TIME_STAMP:
2179 /* FIXME: not implemented */
2180 rb_advance_iter(iter);
2181 goto again;
2182
2183 case RINGBUF_TYPE_DATA:
2184 if (ts) {
2185 *ts = iter->read_stamp + event->time_delta;
Steven Rostedt37886f62009-03-17 17:22:06 -04002186 ring_buffer_normalize_time_stamp(buffer,
2187 cpu_buffer->cpu, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002188 }
2189 return event;
2190
2191 default:
2192 BUG();
2193 }
2194
2195 return NULL;
2196}
Robert Richterc4f50182008-12-11 16:49:22 +01002197EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002198
2199/**
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002200 * ring_buffer_peek - peek at the next event to be read
2201 * @buffer: The ring buffer to read
2202 * @cpu: The cpu to peak at
2203 * @ts: The timestamp counter of this event.
2204 *
2205 * This will return the event that will be read next, but does
2206 * not consume the data.
2207 */
2208struct ring_buffer_event *
2209ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2210{
2211 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
Steven Rostedt8aabee52009-03-12 13:13:49 -04002212 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002213 unsigned long flags;
2214
Steven Rostedt554f7862009-03-11 22:00:13 -04002215 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002216 return NULL;
Steven Rostedt554f7862009-03-11 22:00:13 -04002217
Tom Zanussi2d622712009-03-22 03:30:49 -05002218 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002219 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2220 event = rb_buffer_peek(buffer, cpu, ts);
2221 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2222
Tom Zanussi2d622712009-03-22 03:30:49 -05002223 if (event && event->type == RINGBUF_TYPE_PADDING) {
2224 cpu_relax();
2225 goto again;
2226 }
2227
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002228 return event;
2229}
2230
2231/**
2232 * ring_buffer_iter_peek - peek at the next event to be read
2233 * @iter: The ring buffer iterator
2234 * @ts: The timestamp counter of this event.
2235 *
2236 * This will return the event that will be read next, but does
2237 * not increment the iterator.
2238 */
2239struct ring_buffer_event *
2240ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2241{
2242 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2243 struct ring_buffer_event *event;
2244 unsigned long flags;
2245
Tom Zanussi2d622712009-03-22 03:30:49 -05002246 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002247 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2248 event = rb_iter_peek(iter, ts);
2249 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2250
Tom Zanussi2d622712009-03-22 03:30:49 -05002251 if (event && event->type == RINGBUF_TYPE_PADDING) {
2252 cpu_relax();
2253 goto again;
2254 }
2255
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002256 return event;
2257}
2258
2259/**
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002260 * ring_buffer_consume - return an event and consume it
2261 * @buffer: The ring buffer to get the next event from
2262 *
2263 * Returns the next event in the ring buffer, and that event is consumed.
2264 * Meaning, that sequential reads will keep returning a different event,
2265 * and eventually empty the ring buffer if the producer is slower.
2266 */
2267struct ring_buffer_event *
2268ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2269{
Steven Rostedt554f7862009-03-11 22:00:13 -04002270 struct ring_buffer_per_cpu *cpu_buffer;
2271 struct ring_buffer_event *event = NULL;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002272 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002273
Tom Zanussi2d622712009-03-22 03:30:49 -05002274 again:
Steven Rostedt554f7862009-03-11 22:00:13 -04002275 /* might be called in atomic */
2276 preempt_disable();
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002277
Steven Rostedt554f7862009-03-11 22:00:13 -04002278 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2279 goto out;
2280
2281 cpu_buffer = buffer->buffers[cpu];
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002282 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002283
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002284 event = rb_buffer_peek(buffer, cpu, ts);
2285 if (!event)
Steven Rostedt554f7862009-03-11 22:00:13 -04002286 goto out_unlock;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002287
Steven Rostedtd7690412008-10-01 00:29:53 -04002288 rb_advance_reader(cpu_buffer);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002289
Steven Rostedt554f7862009-03-11 22:00:13 -04002290 out_unlock:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002291 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2292
Steven Rostedt554f7862009-03-11 22:00:13 -04002293 out:
2294 preempt_enable();
2295
Tom Zanussi2d622712009-03-22 03:30:49 -05002296 if (event && event->type == RINGBUF_TYPE_PADDING) {
2297 cpu_relax();
2298 goto again;
2299 }
2300
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002301 return event;
2302}
Robert Richterc4f50182008-12-11 16:49:22 +01002303EXPORT_SYMBOL_GPL(ring_buffer_consume);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002304
2305/**
2306 * ring_buffer_read_start - start a non consuming read of the buffer
2307 * @buffer: The ring buffer to read from
2308 * @cpu: The cpu buffer to iterate over
2309 *
2310 * This starts up an iteration through the buffer. It also disables
2311 * the recording to the buffer until the reading is finished.
2312 * This prevents the reading from being corrupted. This is not
2313 * a consuming read, so a producer is not expected.
2314 *
2315 * Must be paired with ring_buffer_finish.
2316 */
2317struct ring_buffer_iter *
2318ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2319{
2320 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002321 struct ring_buffer_iter *iter;
Steven Rostedtd7690412008-10-01 00:29:53 -04002322 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002323
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302324 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002325 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002326
2327 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2328 if (!iter)
Steven Rostedt8aabee52009-03-12 13:13:49 -04002329 return NULL;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002330
2331 cpu_buffer = buffer->buffers[cpu];
2332
2333 iter->cpu_buffer = cpu_buffer;
2334
2335 atomic_inc(&cpu_buffer->record_disabled);
2336 synchronize_sched();
2337
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002338 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002339 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt642edba2008-11-12 00:01:26 -05002340 rb_iter_reset(iter);
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002341 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002342 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002343
2344 return iter;
2345}
Robert Richterc4f50182008-12-11 16:49:22 +01002346EXPORT_SYMBOL_GPL(ring_buffer_read_start);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002347
2348/**
2349 * ring_buffer_finish - finish reading the iterator of the buffer
2350 * @iter: The iterator retrieved by ring_buffer_start
2351 *
2352 * This re-enables the recording to the buffer, and frees the
2353 * iterator.
2354 */
2355void
2356ring_buffer_read_finish(struct ring_buffer_iter *iter)
2357{
2358 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2359
2360 atomic_dec(&cpu_buffer->record_disabled);
2361 kfree(iter);
2362}
Robert Richterc4f50182008-12-11 16:49:22 +01002363EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002364
2365/**
2366 * ring_buffer_read - read the next item in the ring buffer by the iterator
2367 * @iter: The ring buffer iterator
2368 * @ts: The time stamp of the event read.
2369 *
2370 * This reads the next event in the ring buffer and increments the iterator.
2371 */
2372struct ring_buffer_event *
2373ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2374{
2375 struct ring_buffer_event *event;
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002376 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2377 unsigned long flags;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002378
Tom Zanussi2d622712009-03-22 03:30:49 -05002379 again:
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002380 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2381 event = rb_iter_peek(iter, ts);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002382 if (!event)
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002383 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002384
2385 rb_advance_iter(iter);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002386 out:
2387 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002388
Tom Zanussi2d622712009-03-22 03:30:49 -05002389 if (event && event->type == RINGBUF_TYPE_PADDING) {
2390 cpu_relax();
2391 goto again;
2392 }
2393
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002394 return event;
2395}
Robert Richterc4f50182008-12-11 16:49:22 +01002396EXPORT_SYMBOL_GPL(ring_buffer_read);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002397
2398/**
2399 * ring_buffer_size - return the size of the ring buffer (in bytes)
2400 * @buffer: The ring buffer.
2401 */
2402unsigned long ring_buffer_size(struct ring_buffer *buffer)
2403{
2404 return BUF_PAGE_SIZE * buffer->pages;
2405}
Robert Richterc4f50182008-12-11 16:49:22 +01002406EXPORT_SYMBOL_GPL(ring_buffer_size);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002407
2408static void
2409rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2410{
2411 cpu_buffer->head_page
2412 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
Steven Rostedtbf41a152008-10-04 02:00:59 -04002413 local_set(&cpu_buffer->head_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002414 local_set(&cpu_buffer->head_page->page->commit, 0);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002415
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002416 cpu_buffer->head_page->read = 0;
Steven Rostedtbf41a152008-10-04 02:00:59 -04002417
2418 cpu_buffer->tail_page = cpu_buffer->head_page;
2419 cpu_buffer->commit_page = cpu_buffer->head_page;
2420
2421 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2422 local_set(&cpu_buffer->reader_page->write, 0);
Steven Rostedtabc9b562008-12-02 15:34:06 -05002423 local_set(&cpu_buffer->reader_page->page->commit, 0);
Steven Rostedt6f807ac2008-10-04 02:00:58 -04002424 cpu_buffer->reader_page->read = 0;
Steven Rostedtd7690412008-10-01 00:29:53 -04002425
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002426 cpu_buffer->overrun = 0;
2427 cpu_buffer->entries = 0;
Steven Rostedt69507c02009-01-21 18:45:57 -05002428
2429 cpu_buffer->write_stamp = 0;
2430 cpu_buffer->read_stamp = 0;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002431}
2432
2433/**
2434 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2435 * @buffer: The ring buffer to reset a per cpu buffer of
2436 * @cpu: The CPU buffer to be reset
2437 */
2438void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2439{
2440 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2441 unsigned long flags;
2442
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302443 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002444 return;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002445
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002446 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2447
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002448 __raw_spin_lock(&cpu_buffer->lock);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002449
2450 rb_reset_cpu(cpu_buffer);
2451
Steven Rostedt3e03fb72008-11-06 00:09:43 -05002452 __raw_spin_unlock(&cpu_buffer->lock);
Steven Rostedtf83c9d02008-11-11 18:47:44 +01002453
2454 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002455}
Robert Richterc4f50182008-12-11 16:49:22 +01002456EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002457
2458/**
2459 * ring_buffer_reset - reset a ring buffer
2460 * @buffer: The ring buffer to reset all cpu buffers
2461 */
2462void ring_buffer_reset(struct ring_buffer *buffer)
2463{
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002464 int cpu;
2465
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002466 for_each_buffer_cpu(buffer, cpu)
Steven Rostedtd7690412008-10-01 00:29:53 -04002467 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002468}
Robert Richterc4f50182008-12-11 16:49:22 +01002469EXPORT_SYMBOL_GPL(ring_buffer_reset);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002470
2471/**
2472 * rind_buffer_empty - is the ring buffer empty?
2473 * @buffer: The ring buffer to test
2474 */
2475int ring_buffer_empty(struct ring_buffer *buffer)
2476{
2477 struct ring_buffer_per_cpu *cpu_buffer;
2478 int cpu;
2479
2480 /* yes this is racy, but if you don't like the race, lock the buffer */
2481 for_each_buffer_cpu(buffer, cpu) {
2482 cpu_buffer = buffer->buffers[cpu];
2483 if (!rb_per_cpu_empty(cpu_buffer))
2484 return 0;
2485 }
Steven Rostedt554f7862009-03-11 22:00:13 -04002486
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002487 return 1;
2488}
Robert Richterc4f50182008-12-11 16:49:22 +01002489EXPORT_SYMBOL_GPL(ring_buffer_empty);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002490
2491/**
2492 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2493 * @buffer: The ring buffer
2494 * @cpu: The CPU buffer to test
2495 */
2496int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2497{
2498 struct ring_buffer_per_cpu *cpu_buffer;
Steven Rostedt8aabee52009-03-12 13:13:49 -04002499 int ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002500
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302501 if (!cpumask_test_cpu(cpu, buffer->cpumask))
Steven Rostedt8aabee52009-03-12 13:13:49 -04002502 return 1;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002503
2504 cpu_buffer = buffer->buffers[cpu];
Steven Rostedt554f7862009-03-11 22:00:13 -04002505 ret = rb_per_cpu_empty(cpu_buffer);
2506
Steven Rostedt554f7862009-03-11 22:00:13 -04002507
2508 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002509}
Robert Richterc4f50182008-12-11 16:49:22 +01002510EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002511
2512/**
2513 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2514 * @buffer_a: One buffer to swap with
2515 * @buffer_b: The other buffer to swap with
2516 *
2517 * This function is useful for tracers that want to take a "snapshot"
2518 * of a CPU buffer and has another back up buffer lying around.
2519 * it is expected that the tracer handles the cpu buffer not being
2520 * used at the moment.
2521 */
2522int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2523 struct ring_buffer *buffer_b, int cpu)
2524{
2525 struct ring_buffer_per_cpu *cpu_buffer_a;
2526 struct ring_buffer_per_cpu *cpu_buffer_b;
Steven Rostedt554f7862009-03-11 22:00:13 -04002527 int ret = -EINVAL;
2528
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302529 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2530 !cpumask_test_cpu(cpu, buffer_b->cpumask))
Steven Rostedt554f7862009-03-11 22:00:13 -04002531 goto out;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002532
2533 /* At least make sure the two buffers are somewhat the same */
Lai Jiangshan6d102bc2008-12-17 17:48:23 +08002534 if (buffer_a->pages != buffer_b->pages)
Steven Rostedt554f7862009-03-11 22:00:13 -04002535 goto out;
2536
2537 ret = -EAGAIN;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002538
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002539 if (ring_buffer_flags != RB_BUFFERS_ON)
Steven Rostedt554f7862009-03-11 22:00:13 -04002540 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002541
2542 if (atomic_read(&buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002543 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002544
2545 if (atomic_read(&buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002546 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002547
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002548 cpu_buffer_a = buffer_a->buffers[cpu];
2549 cpu_buffer_b = buffer_b->buffers[cpu];
2550
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002551 if (atomic_read(&cpu_buffer_a->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002552 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002553
2554 if (atomic_read(&cpu_buffer_b->record_disabled))
Steven Rostedt554f7862009-03-11 22:00:13 -04002555 goto out;
Steven Rostedt97b17ef2009-01-21 15:24:56 -05002556
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002557 /*
2558 * We can't do a synchronize_sched here because this
2559 * function can be called in atomic context.
2560 * Normally this will be called from the same CPU as cpu.
2561 * If not it's up to the caller to protect this.
2562 */
2563 atomic_inc(&cpu_buffer_a->record_disabled);
2564 atomic_inc(&cpu_buffer_b->record_disabled);
2565
2566 buffer_a->buffers[cpu] = cpu_buffer_b;
2567 buffer_b->buffers[cpu] = cpu_buffer_a;
2568
2569 cpu_buffer_b->buffer = buffer_a;
2570 cpu_buffer_a->buffer = buffer_b;
2571
2572 atomic_dec(&cpu_buffer_a->record_disabled);
2573 atomic_dec(&cpu_buffer_b->record_disabled);
2574
Steven Rostedt554f7862009-03-11 22:00:13 -04002575 ret = 0;
2576out:
Steven Rostedt554f7862009-03-11 22:00:13 -04002577 return ret;
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002578}
Robert Richterc4f50182008-12-11 16:49:22 +01002579EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
Steven Rostedt7a8e76a2008-09-29 23:02:38 -04002580
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002581static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
Lai Jiangshan667d2412009-02-09 14:21:17 +08002582 struct buffer_data_page *bpage,
2583 unsigned int offset)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002584{
2585 struct ring_buffer_event *event;
2586 unsigned long head;
2587
2588 __raw_spin_lock(&cpu_buffer->lock);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002589 for (head = offset; head < local_read(&bpage->commit);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002590 head += rb_event_length(event)) {
2591
Steven Rostedt044fa782008-12-02 23:50:03 -05002592 event = __rb_data_page_index(bpage, head);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002593 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
2594 return;
2595 /* Only count data entries */
2596 if (event->type != RINGBUF_TYPE_DATA)
2597 continue;
2598 cpu_buffer->entries--;
2599 }
2600 __raw_spin_unlock(&cpu_buffer->lock);
2601}
2602
2603/**
2604 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2605 * @buffer: the buffer to allocate for.
2606 *
2607 * This function is used in conjunction with ring_buffer_read_page.
2608 * When reading a full page from the ring buffer, these functions
2609 * can be used to speed up the process. The calling function should
2610 * allocate a few pages first with this function. Then when it
2611 * needs to get pages from the ring buffer, it passes the result
2612 * of this function into ring_buffer_read_page, which will swap
2613 * the page that was allocated, with the read page of the buffer.
2614 *
2615 * Returns:
2616 * The page allocated, or NULL on error.
2617 */
2618void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
2619{
Steven Rostedt044fa782008-12-02 23:50:03 -05002620 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002621 unsigned long addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002622
2623 addr = __get_free_page(GFP_KERNEL);
2624 if (!addr)
2625 return NULL;
2626
Steven Rostedt044fa782008-12-02 23:50:03 -05002627 bpage = (void *)addr;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002628
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002629 rb_init_page(bpage);
2630
Steven Rostedt044fa782008-12-02 23:50:03 -05002631 return bpage;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002632}
2633
2634/**
2635 * ring_buffer_free_read_page - free an allocated read page
2636 * @buffer: the buffer the page was allocate for
2637 * @data: the page to free
2638 *
2639 * Free a page allocated from ring_buffer_alloc_read_page.
2640 */
2641void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
2642{
2643 free_page((unsigned long)data);
2644}
2645
2646/**
2647 * ring_buffer_read_page - extract a page from the ring buffer
2648 * @buffer: buffer to extract from
2649 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002650 * @len: amount to extract
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002651 * @cpu: the cpu of the buffer to extract
2652 * @full: should the extraction only happen when the page is full.
2653 *
2654 * This function will pull out a page from the ring buffer and consume it.
2655 * @data_page must be the address of the variable that was returned
2656 * from ring_buffer_alloc_read_page. This is because the page might be used
2657 * to swap with a page in the ring buffer.
2658 *
2659 * for example:
Lai Jiangshanb85fa012009-02-09 14:21:14 +08002660 * rpage = ring_buffer_alloc_read_page(buffer);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002661 * if (!rpage)
2662 * return error;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002663 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002664 * if (ret >= 0)
2665 * process_page(rpage, ret);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002666 *
2667 * When @full is set, the function will not return true unless
2668 * the writer is off the reader page.
2669 *
2670 * Note: it is up to the calling functions to handle sleeps and wakeups.
2671 * The ring buffer can be used anywhere in the kernel and can not
2672 * blindly call wake_up. The layer that uses the ring buffer must be
2673 * responsible for that.
2674 *
2675 * Returns:
Lai Jiangshan667d2412009-02-09 14:21:17 +08002676 * >=0 if data has been transferred, returns the offset of consumed data.
2677 * <0 if no data has been transferred.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002678 */
2679int ring_buffer_read_page(struct ring_buffer *buffer,
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002680 void **data_page, size_t len, int cpu, int full)
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002681{
2682 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2683 struct ring_buffer_event *event;
Steven Rostedt044fa782008-12-02 23:50:03 -05002684 struct buffer_data_page *bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002685 struct buffer_page *reader;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002686 unsigned long flags;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002687 unsigned int commit;
Lai Jiangshan667d2412009-02-09 14:21:17 +08002688 unsigned int read;
Steven Rostedt4f3640f2009-03-03 23:52:42 -05002689 u64 save_timestamp;
Lai Jiangshan667d2412009-02-09 14:21:17 +08002690 int ret = -1;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002691
Steven Rostedt554f7862009-03-11 22:00:13 -04002692 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2693 goto out;
2694
Steven Rostedt474d32b2009-03-03 19:51:40 -05002695 /*
2696 * If len is not big enough to hold the page header, then
2697 * we can not copy anything.
2698 */
2699 if (len <= BUF_PAGE_HDR_SIZE)
Steven Rostedt554f7862009-03-11 22:00:13 -04002700 goto out;
Steven Rostedt474d32b2009-03-03 19:51:40 -05002701
2702 len -= BUF_PAGE_HDR_SIZE;
2703
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002704 if (!data_page)
Steven Rostedt554f7862009-03-11 22:00:13 -04002705 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002706
Steven Rostedt044fa782008-12-02 23:50:03 -05002707 bpage = *data_page;
2708 if (!bpage)
Steven Rostedt554f7862009-03-11 22:00:13 -04002709 goto out;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002710
2711 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2712
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002713 reader = rb_get_reader_page(cpu_buffer);
2714 if (!reader)
Steven Rostedt554f7862009-03-11 22:00:13 -04002715 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002716
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002717 event = rb_reader_event(cpu_buffer);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002718
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002719 read = reader->read;
2720 commit = rb_page_commit(reader);
2721
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002722 /*
Steven Rostedt474d32b2009-03-03 19:51:40 -05002723 * If this page has been partially read or
2724 * if len is not big enough to read the rest of the page or
2725 * a writer is still on the page, then
2726 * we must copy the data from the page to the buffer.
2727 * Otherwise, we can simply swap the page with the one passed in.
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002728 */
Steven Rostedt474d32b2009-03-03 19:51:40 -05002729 if (read || (len < (commit - read)) ||
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002730 cpu_buffer->reader_page == cpu_buffer->commit_page) {
Lai Jiangshan667d2412009-02-09 14:21:17 +08002731 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
Steven Rostedt474d32b2009-03-03 19:51:40 -05002732 unsigned int rpos = read;
2733 unsigned int pos = 0;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002734 unsigned int size;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002735
2736 if (full)
Steven Rostedt554f7862009-03-11 22:00:13 -04002737 goto out_unlock;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002738
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002739 if (len > (commit - read))
2740 len = (commit - read);
2741
2742 size = rb_event_length(event);
2743
2744 if (len < size)
Steven Rostedt554f7862009-03-11 22:00:13 -04002745 goto out_unlock;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002746
Steven Rostedt4f3640f2009-03-03 23:52:42 -05002747 /* save the current timestamp, since the user will need it */
2748 save_timestamp = cpu_buffer->read_stamp;
2749
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002750 /* Need to copy one event at a time */
2751 do {
Steven Rostedt474d32b2009-03-03 19:51:40 -05002752 memcpy(bpage->data + pos, rpage->data + rpos, size);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002753
2754 len -= size;
2755
2756 rb_advance_reader(cpu_buffer);
Steven Rostedt474d32b2009-03-03 19:51:40 -05002757 rpos = reader->read;
2758 pos += size;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002759
2760 event = rb_reader_event(cpu_buffer);
2761 size = rb_event_length(event);
2762 } while (len > size);
Lai Jiangshan667d2412009-02-09 14:21:17 +08002763
2764 /* update bpage */
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002765 local_set(&bpage->commit, pos);
Steven Rostedt4f3640f2009-03-03 23:52:42 -05002766 bpage->time_stamp = save_timestamp;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002767
Steven Rostedt474d32b2009-03-03 19:51:40 -05002768 /* we copied everything to the beginning */
2769 read = 0;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002770 } else {
2771 /* swap the pages */
Steven Rostedt044fa782008-12-02 23:50:03 -05002772 rb_init_page(bpage);
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002773 bpage = reader->page;
2774 reader->page = *data_page;
2775 local_set(&reader->write, 0);
2776 reader->read = 0;
Steven Rostedt044fa782008-12-02 23:50:03 -05002777 *data_page = bpage;
Steven Rostedtef7a4a12009-03-03 00:27:49 -05002778
2779 /* update the entry counter */
2780 rb_remove_entries(cpu_buffer, bpage, read);
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002781 }
Lai Jiangshan667d2412009-02-09 14:21:17 +08002782 ret = read;
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002783
Steven Rostedt554f7862009-03-11 22:00:13 -04002784 out_unlock:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002785 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2786
Steven Rostedt554f7862009-03-11 22:00:13 -04002787 out:
Steven Rostedt8789a9e2008-12-02 15:34:07 -05002788 return ret;
2789}
2790
Steven Rostedta3583242008-11-11 15:01:42 -05002791static ssize_t
2792rb_simple_read(struct file *filp, char __user *ubuf,
2793 size_t cnt, loff_t *ppos)
2794{
Hannes Eder5e398412009-02-10 19:44:34 +01002795 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05002796 char buf[64];
2797 int r;
2798
Steven Rostedt033601a2008-11-21 12:41:55 -05002799 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2800 r = sprintf(buf, "permanently disabled\n");
2801 else
2802 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
Steven Rostedta3583242008-11-11 15:01:42 -05002803
2804 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2805}
2806
2807static ssize_t
2808rb_simple_write(struct file *filp, const char __user *ubuf,
2809 size_t cnt, loff_t *ppos)
2810{
Hannes Eder5e398412009-02-10 19:44:34 +01002811 unsigned long *p = filp->private_data;
Steven Rostedta3583242008-11-11 15:01:42 -05002812 char buf[64];
Hannes Eder5e398412009-02-10 19:44:34 +01002813 unsigned long val;
Steven Rostedta3583242008-11-11 15:01:42 -05002814 int ret;
2815
2816 if (cnt >= sizeof(buf))
2817 return -EINVAL;
2818
2819 if (copy_from_user(&buf, ubuf, cnt))
2820 return -EFAULT;
2821
2822 buf[cnt] = 0;
2823
2824 ret = strict_strtoul(buf, 10, &val);
2825 if (ret < 0)
2826 return ret;
2827
Steven Rostedt033601a2008-11-21 12:41:55 -05002828 if (val)
2829 set_bit(RB_BUFFERS_ON_BIT, p);
2830 else
2831 clear_bit(RB_BUFFERS_ON_BIT, p);
Steven Rostedta3583242008-11-11 15:01:42 -05002832
2833 (*ppos)++;
2834
2835 return cnt;
2836}
2837
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002838static const struct file_operations rb_simple_fops = {
Steven Rostedta3583242008-11-11 15:01:42 -05002839 .open = tracing_open_generic,
2840 .read = rb_simple_read,
2841 .write = rb_simple_write,
2842};
2843
2844
2845static __init int rb_init_debugfs(void)
2846{
2847 struct dentry *d_tracer;
2848 struct dentry *entry;
2849
2850 d_tracer = tracing_init_dentry();
2851
2852 entry = debugfs_create_file("tracing_on", 0644, d_tracer,
Steven Rostedt033601a2008-11-21 12:41:55 -05002853 &ring_buffer_flags, &rb_simple_fops);
Steven Rostedta3583242008-11-11 15:01:42 -05002854 if (!entry)
2855 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2856
2857 return 0;
2858}
2859
2860fs_initcall(rb_init_debugfs);
Steven Rostedt554f7862009-03-11 22:00:13 -04002861
Steven Rostedt59222ef2009-03-12 11:46:03 -04002862#ifdef CONFIG_HOTPLUG_CPU
Frederic Weisbecker09c9e842009-03-21 04:33:36 +01002863static int rb_cpu_notify(struct notifier_block *self,
2864 unsigned long action, void *hcpu)
Steven Rostedt554f7862009-03-11 22:00:13 -04002865{
2866 struct ring_buffer *buffer =
2867 container_of(self, struct ring_buffer, cpu_notify);
2868 long cpu = (long)hcpu;
2869
2870 switch (action) {
2871 case CPU_UP_PREPARE:
2872 case CPU_UP_PREPARE_FROZEN:
2873 if (cpu_isset(cpu, *buffer->cpumask))
2874 return NOTIFY_OK;
2875
2876 buffer->buffers[cpu] =
2877 rb_allocate_cpu_buffer(buffer, cpu);
2878 if (!buffer->buffers[cpu]) {
2879 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
2880 cpu);
2881 return NOTIFY_OK;
2882 }
2883 smp_wmb();
2884 cpu_set(cpu, *buffer->cpumask);
2885 break;
2886 case CPU_DOWN_PREPARE:
2887 case CPU_DOWN_PREPARE_FROZEN:
2888 /*
2889 * Do nothing.
2890 * If we were to free the buffer, then the user would
2891 * lose any trace that was in the buffer.
2892 */
2893 break;
2894 default:
2895 break;
2896 }
2897 return NOTIFY_OK;
2898}
2899#endif