blob: 5377c591c57a25f3d0dc989ede53f8e5299bf3c7 [file] [log] [blame]
Frederic Weisbecker76369132011-05-19 19:55:04 +02001#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
Borislav Petkov9251f902011-10-16 17:15:04 +02004#include <linux/hardirq.h>
Frederic Weisbecker91d77532012-08-07 15:20:38 +02005#include <linux/uaccess.h>
Borislav Petkov9251f902011-10-16 17:15:04 +02006
7/* Buffer handling */
8
Frederic Weisbecker76369132011-05-19 19:55:04 +02009#define RING_BUFFER_WRITABLE 0x01
10
11struct ring_buffer {
12 atomic_t refcount;
13 struct rcu_head rcu_head;
14#ifdef CONFIG_PERF_USE_VMALLOC
15 struct work_struct work;
16 int page_order; /* allocation order */
17#endif
18 int nr_pages; /* nr of data pages */
Stephane Eraniandd9c0862013-03-18 14:33:28 +010019 int overwrite; /* can overwrite itself */
Wang Nan86e79722016-03-28 06:41:29 +000020 int paused; /* can write into ring buffer */
Frederic Weisbecker76369132011-05-19 19:55:04 +020021
22 atomic_t poll; /* POLL_ for wakeups */
23
24 local_t head; /* write position */
25 local_t nest; /* nested writers */
26 local_t events; /* event limit */
27 local_t wakeup; /* wakeup stamp */
28 local_t lost; /* nr records lost */
29
30 long watermark; /* wakeup watermark */
Alexander Shishkin1a594132015-01-14 14:18:18 +020031 long aux_watermark;
Peter Zijlstra10c6db12011-11-26 02:47:31 +010032 /* poll crap */
33 spinlock_t event_lock;
34 struct list_head event_list;
Frederic Weisbecker76369132011-05-19 19:55:04 +020035
Peter Zijlstra9bb5d402013-06-04 10:44:21 +020036 atomic_t mmap_count;
37 unsigned long mmap_locked;
Peter Zijlstra26cb63a2013-05-28 10:55:48 +020038 struct user_struct *mmap_user;
39
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020040 /* AUX area */
Will Deacon2ab346c2017-08-16 17:18:16 +010041 long aux_head;
Alexander Shishkinfdc26702015-01-14 14:18:16 +020042 local_t aux_nest;
Will Deacond9a50b02017-08-16 17:18:17 +010043 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020044 unsigned long aux_pgoff;
45 int aux_nr_pages;
Alexander Shishkin2023a0d2015-01-14 14:18:17 +020046 int aux_overwrite;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020047 atomic_t aux_mmap_count;
48 unsigned long aux_mmap_locked;
49 void (*free_aux)(void *);
50 atomic_t aux_refcount;
51 void **aux_pages;
52 void *aux_priv;
53
Frederic Weisbecker76369132011-05-19 19:55:04 +020054 struct perf_event_mmap_page *user_page;
55 void *data_pages[0];
56};
57
Frederic Weisbecker76369132011-05-19 19:55:04 +020058extern void rb_free(struct ring_buffer *rb);
Peter Zijlstra57ffc5c2015-06-18 12:32:49 +020059
60static inline void rb_free_rcu(struct rcu_head *rcu_head)
61{
62 struct ring_buffer *rb;
63
64 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65 rb_free(rb);
66}
67
Wang Nan86e79722016-03-28 06:41:29 +000068static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
69{
70 if (!pause && rb->nr_pages)
71 rb->paused = 0;
72 else
73 rb->paused = 1;
74}
75
Frederic Weisbecker76369132011-05-19 19:55:04 +020076extern struct ring_buffer *
77rb_alloc(int nr_pages, long watermark, int cpu, int flags);
78extern void perf_event_wakeup(struct perf_event *event);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020079extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
Alexander Shishkin1a594132015-01-14 14:18:18 +020080 pgoff_t pgoff, int nr_pages, long watermark, int flags);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020081extern void rb_free_aux(struct ring_buffer *rb);
Alexander Shishkinfdc26702015-01-14 14:18:16 +020082extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
83extern void ring_buffer_put(struct ring_buffer *rb);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020084
85static inline bool rb_has_aux(struct ring_buffer *rb)
86{
87 return !!rb->aux_nr_pages;
88}
Frederic Weisbecker76369132011-05-19 19:55:04 +020089
Alexander Shishkin68db7e92015-01-14 14:18:15 +020090void perf_event_aux_event(struct perf_event *event, unsigned long head,
91 unsigned long size, u64 flags);
92
Frederic Weisbecker76369132011-05-19 19:55:04 +020093extern struct page *
94perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
95
96#ifdef CONFIG_PERF_USE_VMALLOC
97/*
98 * Back perf_mmap() with vmalloc memory.
99 *
100 * Required for architectures that have d-cache aliasing issues.
101 */
102
103static inline int page_order(struct ring_buffer *rb)
104{
105 return rb->page_order;
106}
107
108#else
109
110static inline int page_order(struct ring_buffer *rb)
111{
112 return 0;
113}
114#endif
115
Borislav Petkov9251f902011-10-16 17:15:04 +0200116static inline unsigned long perf_data_size(struct ring_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +0200117{
118 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
119}
120
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +0200121static inline unsigned long perf_aux_size(struct ring_buffer *rb)
122{
123 return rb->aux_nr_pages << PAGE_SHIFT;
124}
125
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200126#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200127{ \
128 unsigned long size, written; \
129 \
130 do { \
Peter Zijlstra0a196842013-10-30 21:16:22 +0100131 size = min(handle->size, len); \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200132 written = memcpy_func(__VA_ARGS__); \
Peter Zijlstra0a196842013-10-30 21:16:22 +0100133 written = size - written; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200134 \
135 len -= written; \
136 handle->addr += written; \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200137 if (advance_buf) \
138 buf += written; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200139 handle->size -= written; \
140 if (!handle->size) { \
141 struct ring_buffer *rb = handle->rb; \
142 \
143 handle->page++; \
144 handle->page &= rb->nr_pages - 1; \
145 handle->addr = rb->data_pages[handle->page]; \
146 handle->size = PAGE_SIZE << page_order(rb); \
147 } \
148 } while (len && written == size); \
149 \
150 return len; \
Frederic Weisbecker76369132011-05-19 19:55:04 +0200151}
152
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200153#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
154static inline unsigned long \
155func_name(struct perf_output_handle *handle, \
156 const void *buf, unsigned long len) \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200157__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200158
159static inline unsigned long
160__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
161 const void *buf, unsigned long len)
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200162{
163 unsigned long orig_len = len;
164 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
165 orig_len - len, size)
166}
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200167
Peter Zijlstra0a196842013-10-30 21:16:22 +0100168static inline unsigned long
169memcpy_common(void *dst, const void *src, unsigned long n)
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200170{
171 memcpy(dst, src, n);
Peter Zijlstra0a196842013-10-30 21:16:22 +0100172 return 0;
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200173}
174
175DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
176
Peter Zijlstra0a196842013-10-30 21:16:22 +0100177static inline unsigned long
178memcpy_skip(void *dst, const void *src, unsigned long n)
179{
180 return 0;
181}
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200182
Peter Zijlstra0a196842013-10-30 21:16:22 +0100183DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200184
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200185#ifndef arch_perf_out_copy_user
Peter Zijlstra0a196842013-10-30 21:16:22 +0100186#define arch_perf_out_copy_user arch_perf_out_copy_user
187
188static inline unsigned long
189arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
190{
191 unsigned long ret;
192
193 pagefault_disable();
194 ret = __copy_from_user_inatomic(dst, src, n);
195 pagefault_enable();
196
197 return ret;
198}
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200199#endif
200
201DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
202
Borislav Petkov9251f902011-10-16 17:15:04 +0200203/* Callchain handling */
Andrew Vagine6dab5f2012-07-11 18:14:58 +0400204extern struct perf_callchain_entry *
205perf_callchain(struct perf_event *event, struct pt_regs *regs);
Borislav Petkov9251f902011-10-16 17:15:04 +0200206
207static inline int get_recursion_context(int *recursion)
208{
209 int rctx;
210
211 if (in_nmi())
212 rctx = 3;
213 else if (in_irq())
214 rctx = 2;
215 else if (in_softirq())
216 rctx = 1;
217 else
218 rctx = 0;
219
220 if (recursion[rctx])
221 return -1;
222
223 recursion[rctx]++;
224 barrier();
225
226 return rctx;
227}
228
229static inline void put_recursion_context(int *recursion, int rctx)
230{
231 barrier();
232 recursion[rctx]--;
233}
234
Jiri Olsac5ebced2012-08-07 15:20:40 +0200235#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
236static inline bool arch_perf_have_user_stack_dump(void)
237{
238 return true;
239}
240
241#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
242#else
243static inline bool arch_perf_have_user_stack_dump(void)
244{
245 return false;
246}
247
248#define perf_user_stack_pointer(regs) 0
249#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
250
Frederic Weisbecker76369132011-05-19 19:55:04 +0200251#endif /* _KERNEL_EVENTS_INTERNAL_H */