blob: 09b1537ae06cd4645458a12ea111b4cf813038da [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Frederic Weisbecker76369132011-05-19 19:55:04 +02002#ifndef _KERNEL_EVENTS_INTERNAL_H
3#define _KERNEL_EVENTS_INTERNAL_H
4
Borislav Petkov9251f902011-10-16 17:15:04 +02005#include <linux/hardirq.h>
Frederic Weisbecker91d77532012-08-07 15:20:38 +02006#include <linux/uaccess.h>
Borislav Petkov9251f902011-10-16 17:15:04 +02007
8/* Buffer handling */
9
Frederic Weisbecker76369132011-05-19 19:55:04 +020010#define RING_BUFFER_WRITABLE 0x01
11
12struct ring_buffer {
13 atomic_t refcount;
14 struct rcu_head rcu_head;
15#ifdef CONFIG_PERF_USE_VMALLOC
16 struct work_struct work;
17 int page_order; /* allocation order */
18#endif
19 int nr_pages; /* nr of data pages */
Stephane Eraniandd9c0862013-03-18 14:33:28 +010020 int overwrite; /* can overwrite itself */
Wang Nan86e79722016-03-28 06:41:29 +000021 int paused; /* can write into ring buffer */
Frederic Weisbecker76369132011-05-19 19:55:04 +020022
23 atomic_t poll; /* POLL_ for wakeups */
24
25 local_t head; /* write position */
26 local_t nest; /* nested writers */
27 local_t events; /* event limit */
28 local_t wakeup; /* wakeup stamp */
29 local_t lost; /* nr records lost */
30
31 long watermark; /* wakeup watermark */
Alexander Shishkin1a594132015-01-14 14:18:18 +020032 long aux_watermark;
Peter Zijlstra10c6db12011-11-26 02:47:31 +010033 /* poll crap */
34 spinlock_t event_lock;
35 struct list_head event_list;
Frederic Weisbecker76369132011-05-19 19:55:04 +020036
Peter Zijlstra9bb5d402013-06-04 10:44:21 +020037 atomic_t mmap_count;
38 unsigned long mmap_locked;
Peter Zijlstra26cb63a2013-05-28 10:55:48 +020039 struct user_struct *mmap_user;
40
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020041 /* AUX area */
Will Deacon2ab346c2017-08-16 17:18:16 +010042 long aux_head;
Alexander Shishkinfdc26702015-01-14 14:18:16 +020043 local_t aux_nest;
Will Deacond9a50b02017-08-16 17:18:17 +010044 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020045 unsigned long aux_pgoff;
46 int aux_nr_pages;
Alexander Shishkin2023a0d2015-01-14 14:18:17 +020047 int aux_overwrite;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020048 atomic_t aux_mmap_count;
49 unsigned long aux_mmap_locked;
50 void (*free_aux)(void *);
51 atomic_t aux_refcount;
52 void **aux_pages;
53 void *aux_priv;
54
Frederic Weisbecker76369132011-05-19 19:55:04 +020055 struct perf_event_mmap_page *user_page;
56 void *data_pages[0];
57};
58
Frederic Weisbecker76369132011-05-19 19:55:04 +020059extern void rb_free(struct ring_buffer *rb);
Peter Zijlstra57ffc5c2015-06-18 12:32:49 +020060
61static inline void rb_free_rcu(struct rcu_head *rcu_head)
62{
63 struct ring_buffer *rb;
64
65 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
66 rb_free(rb);
67}
68
Wang Nan86e79722016-03-28 06:41:29 +000069static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
70{
71 if (!pause && rb->nr_pages)
72 rb->paused = 0;
73 else
74 rb->paused = 1;
75}
76
Frederic Weisbecker76369132011-05-19 19:55:04 +020077extern struct ring_buffer *
78rb_alloc(int nr_pages, long watermark, int cpu, int flags);
79extern void perf_event_wakeup(struct perf_event *event);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020080extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
Alexander Shishkin1a594132015-01-14 14:18:18 +020081 pgoff_t pgoff, int nr_pages, long watermark, int flags);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020082extern void rb_free_aux(struct ring_buffer *rb);
Alexander Shishkinfdc26702015-01-14 14:18:16 +020083extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
84extern void ring_buffer_put(struct ring_buffer *rb);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020085
86static inline bool rb_has_aux(struct ring_buffer *rb)
87{
88 return !!rb->aux_nr_pages;
89}
Frederic Weisbecker76369132011-05-19 19:55:04 +020090
Alexander Shishkin68db7e92015-01-14 14:18:15 +020091void perf_event_aux_event(struct perf_event *event, unsigned long head,
92 unsigned long size, u64 flags);
93
Frederic Weisbecker76369132011-05-19 19:55:04 +020094extern struct page *
95perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
96
97#ifdef CONFIG_PERF_USE_VMALLOC
98/*
99 * Back perf_mmap() with vmalloc memory.
100 *
101 * Required for architectures that have d-cache aliasing issues.
102 */
103
104static inline int page_order(struct ring_buffer *rb)
105{
106 return rb->page_order;
107}
108
109#else
110
111static inline int page_order(struct ring_buffer *rb)
112{
113 return 0;
114}
115#endif
116
Borislav Petkov9251f902011-10-16 17:15:04 +0200117static inline unsigned long perf_data_size(struct ring_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +0200118{
119 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
120}
121
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +0200122static inline unsigned long perf_aux_size(struct ring_buffer *rb)
123{
124 return rb->aux_nr_pages << PAGE_SHIFT;
125}
126
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200127#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200128{ \
129 unsigned long size, written; \
130 \
131 do { \
Peter Zijlstra0a196842013-10-30 21:16:22 +0100132 size = min(handle->size, len); \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200133 written = memcpy_func(__VA_ARGS__); \
Peter Zijlstra0a196842013-10-30 21:16:22 +0100134 written = size - written; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200135 \
136 len -= written; \
137 handle->addr += written; \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200138 if (advance_buf) \
139 buf += written; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200140 handle->size -= written; \
141 if (!handle->size) { \
142 struct ring_buffer *rb = handle->rb; \
143 \
144 handle->page++; \
145 handle->page &= rb->nr_pages - 1; \
146 handle->addr = rb->data_pages[handle->page]; \
147 handle->size = PAGE_SIZE << page_order(rb); \
148 } \
149 } while (len && written == size); \
150 \
151 return len; \
Frederic Weisbecker76369132011-05-19 19:55:04 +0200152}
153
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200154#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
155static inline unsigned long \
156func_name(struct perf_output_handle *handle, \
157 const void *buf, unsigned long len) \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200158__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200159
160static inline unsigned long
161__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
162 const void *buf, unsigned long len)
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200163{
164 unsigned long orig_len = len;
165 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
166 orig_len - len, size)
167}
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200168
Peter Zijlstra0a196842013-10-30 21:16:22 +0100169static inline unsigned long
170memcpy_common(void *dst, const void *src, unsigned long n)
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200171{
172 memcpy(dst, src, n);
Peter Zijlstra0a196842013-10-30 21:16:22 +0100173 return 0;
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200174}
175
176DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
177
Peter Zijlstra0a196842013-10-30 21:16:22 +0100178static inline unsigned long
179memcpy_skip(void *dst, const void *src, unsigned long n)
180{
181 return 0;
182}
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200183
Peter Zijlstra0a196842013-10-30 21:16:22 +0100184DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200185
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200186#ifndef arch_perf_out_copy_user
Peter Zijlstra0a196842013-10-30 21:16:22 +0100187#define arch_perf_out_copy_user arch_perf_out_copy_user
188
189static inline unsigned long
190arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
191{
192 unsigned long ret;
193
194 pagefault_disable();
195 ret = __copy_from_user_inatomic(dst, src, n);
196 pagefault_enable();
197
198 return ret;
199}
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200200#endif
201
202DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
203
Borislav Petkov9251f902011-10-16 17:15:04 +0200204/* Callchain handling */
Andrew Vagine6dab5f2012-07-11 18:14:58 +0400205extern struct perf_callchain_entry *
206perf_callchain(struct perf_event *event, struct pt_regs *regs);
Borislav Petkov9251f902011-10-16 17:15:04 +0200207
208static inline int get_recursion_context(int *recursion)
209{
210 int rctx;
211
Jesper Dangaard Brouerd0618412017-08-22 19:22:43 +0200212 if (unlikely(in_nmi()))
Borislav Petkov9251f902011-10-16 17:15:04 +0200213 rctx = 3;
214 else if (in_irq())
215 rctx = 2;
216 else if (in_softirq())
217 rctx = 1;
218 else
219 rctx = 0;
220
221 if (recursion[rctx])
222 return -1;
223
224 recursion[rctx]++;
225 barrier();
226
227 return rctx;
228}
229
230static inline void put_recursion_context(int *recursion, int rctx)
231{
232 barrier();
233 recursion[rctx]--;
234}
235
Jiri Olsac5ebced2012-08-07 15:20:40 +0200236#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
237static inline bool arch_perf_have_user_stack_dump(void)
238{
239 return true;
240}
241
242#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
243#else
244static inline bool arch_perf_have_user_stack_dump(void)
245{
246 return false;
247}
248
249#define perf_user_stack_pointer(regs) 0
250#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
251
Frederic Weisbecker76369132011-05-19 19:55:04 +0200252#endif /* _KERNEL_EVENTS_INTERNAL_H */