blob: a096c19f2c2a1aba50b62df4b91618cdcdb276d9 [file] [log] [blame]
Frederic Weisbecker76369132011-05-19 19:55:04 +02001#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
Borislav Petkov9251f902011-10-16 17:15:04 +02004#include <linux/hardirq.h>
5
6/* Buffer handling */
7
Frederic Weisbecker76369132011-05-19 19:55:04 +02008#define RING_BUFFER_WRITABLE 0x01
9
10struct ring_buffer {
11 atomic_t refcount;
12 struct rcu_head rcu_head;
13#ifdef CONFIG_PERF_USE_VMALLOC
14 struct work_struct work;
15 int page_order; /* allocation order */
16#endif
17 int nr_pages; /* nr of data pages */
18 int writable; /* are we writable */
19
20 atomic_t poll; /* POLL_ for wakeups */
21
22 local_t head; /* write position */
23 local_t nest; /* nested writers */
24 local_t events; /* event limit */
25 local_t wakeup; /* wakeup stamp */
26 local_t lost; /* nr records lost */
27
28 long watermark; /* wakeup watermark */
Peter Zijlstra10c6db12011-11-26 02:47:31 +010029 /* poll crap */
30 spinlock_t event_lock;
31 struct list_head event_list;
Frederic Weisbecker76369132011-05-19 19:55:04 +020032
33 struct perf_event_mmap_page *user_page;
34 void *data_pages[0];
35};
36
Frederic Weisbecker76369132011-05-19 19:55:04 +020037extern void rb_free(struct ring_buffer *rb);
38extern struct ring_buffer *
39rb_alloc(int nr_pages, long watermark, int cpu, int flags);
40extern void perf_event_wakeup(struct perf_event *event);
41
42extern void
43perf_event_header__init_id(struct perf_event_header *header,
44 struct perf_sample_data *data,
45 struct perf_event *event);
46extern void
47perf_event__output_id_sample(struct perf_event *event,
48 struct perf_output_handle *handle,
49 struct perf_sample_data *sample);
50
51extern struct page *
52perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
53
54#ifdef CONFIG_PERF_USE_VMALLOC
55/*
56 * Back perf_mmap() with vmalloc memory.
57 *
58 * Required for architectures that have d-cache aliasing issues.
59 */
60
61static inline int page_order(struct ring_buffer *rb)
62{
63 return rb->page_order;
64}
65
66#else
67
68static inline int page_order(struct ring_buffer *rb)
69{
70 return 0;
71}
72#endif
73
Borislav Petkov9251f902011-10-16 17:15:04 +020074static inline unsigned long perf_data_size(struct ring_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +020075{
76 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
77}
78
79static inline void
80__output_copy(struct perf_output_handle *handle,
81 const void *buf, unsigned int len)
82{
83 do {
84 unsigned long size = min_t(unsigned long, handle->size, len);
85
86 memcpy(handle->addr, buf, size);
87
88 len -= size;
89 handle->addr += size;
90 buf += size;
91 handle->size -= size;
92 if (!handle->size) {
93 struct ring_buffer *rb = handle->rb;
94
95 handle->page++;
96 handle->page &= rb->nr_pages - 1;
97 handle->addr = rb->data_pages[handle->page];
98 handle->size = PAGE_SIZE << page_order(rb);
99 }
100 } while (len);
101}
102
Borislav Petkov9251f902011-10-16 17:15:04 +0200103/* Callchain handling */
Andrew Vagine6dab5f2012-07-11 18:14:58 +0400104extern struct perf_callchain_entry *
105perf_callchain(struct perf_event *event, struct pt_regs *regs);
Borislav Petkov9251f902011-10-16 17:15:04 +0200106extern int get_callchain_buffers(void);
107extern void put_callchain_buffers(void);
108
109static inline int get_recursion_context(int *recursion)
110{
111 int rctx;
112
113 if (in_nmi())
114 rctx = 3;
115 else if (in_irq())
116 rctx = 2;
117 else if (in_softirq())
118 rctx = 1;
119 else
120 rctx = 0;
121
122 if (recursion[rctx])
123 return -1;
124
125 recursion[rctx]++;
126 barrier();
127
128 return rctx;
129}
130
131static inline void put_recursion_context(int *recursion, int rctx)
132{
133 barrier();
134 recursion[rctx]--;
135}
136
Frederic Weisbecker76369132011-05-19 19:55:04 +0200137#endif /* _KERNEL_EVENTS_INTERNAL_H */