blob: 79c47076700af5d1276d5ccb8c63d6435fff8cf8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Frederic Weisbecker76369132011-05-19 19:55:04 +02002#ifndef _KERNEL_EVENTS_INTERNAL_H
3#define _KERNEL_EVENTS_INTERNAL_H
4
Borislav Petkov9251f902011-10-16 17:15:04 +02005#include <linux/hardirq.h>
Frederic Weisbecker91d77532012-08-07 15:20:38 +02006#include <linux/uaccess.h>
Elena Reshetovafecb8ed2019-01-28 14:27:27 +02007#include <linux/refcount.h>
Borislav Petkov9251f902011-10-16 17:15:04 +02008
9/* Buffer handling */
10
Frederic Weisbecker76369132011-05-19 19:55:04 +020011#define RING_BUFFER_WRITABLE 0x01
12
13struct ring_buffer {
Elena Reshetovafecb8ed2019-01-28 14:27:27 +020014 refcount_t refcount;
Frederic Weisbecker76369132011-05-19 19:55:04 +020015 struct rcu_head rcu_head;
16#ifdef CONFIG_PERF_USE_VMALLOC
17 struct work_struct work;
18 int page_order; /* allocation order */
19#endif
20 int nr_pages; /* nr of data pages */
Stephane Eraniandd9c0862013-03-18 14:33:28 +010021 int overwrite; /* can overwrite itself */
Wang Nan86e79722016-03-28 06:41:29 +000022 int paused; /* can write into ring buffer */
Frederic Weisbecker76369132011-05-19 19:55:04 +020023
24 atomic_t poll; /* POLL_ for wakeups */
25
26 local_t head; /* write position */
27 local_t nest; /* nested writers */
28 local_t events; /* event limit */
29 local_t wakeup; /* wakeup stamp */
30 local_t lost; /* nr records lost */
31
32 long watermark; /* wakeup watermark */
Alexander Shishkin1a594132015-01-14 14:18:18 +020033 long aux_watermark;
Peter Zijlstra10c6db12011-11-26 02:47:31 +010034 /* poll crap */
35 spinlock_t event_lock;
36 struct list_head event_list;
Frederic Weisbecker76369132011-05-19 19:55:04 +020037
Peter Zijlstra9bb5d402013-06-04 10:44:21 +020038 atomic_t mmap_count;
39 unsigned long mmap_locked;
Peter Zijlstra26cb63a2013-05-28 10:55:48 +020040 struct user_struct *mmap_user;
41
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020042 /* AUX area */
Will Deacon2ab346c2017-08-16 17:18:16 +010043 long aux_head;
Alexander Shishkinfdc26702015-01-14 14:18:16 +020044 local_t aux_nest;
Will Deacond9a50b02017-08-16 17:18:17 +010045 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020046 unsigned long aux_pgoff;
47 int aux_nr_pages;
Alexander Shishkin2023a0d2015-01-14 14:18:17 +020048 int aux_overwrite;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020049 atomic_t aux_mmap_count;
50 unsigned long aux_mmap_locked;
51 void (*free_aux)(void *);
Elena Reshetovaca3bb3d2019-01-28 14:27:28 +020052 refcount_t aux_refcount;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020053 void **aux_pages;
54 void *aux_priv;
55
Frederic Weisbecker76369132011-05-19 19:55:04 +020056 struct perf_event_mmap_page *user_page;
57 void *data_pages[0];
58};
59
Frederic Weisbecker76369132011-05-19 19:55:04 +020060extern void rb_free(struct ring_buffer *rb);
Peter Zijlstra57ffc5c2015-06-18 12:32:49 +020061
62static inline void rb_free_rcu(struct rcu_head *rcu_head)
63{
64 struct ring_buffer *rb;
65
66 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
67 rb_free(rb);
68}
69
Wang Nan86e79722016-03-28 06:41:29 +000070static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
71{
72 if (!pause && rb->nr_pages)
73 rb->paused = 0;
74 else
75 rb->paused = 1;
76}
77
Frederic Weisbecker76369132011-05-19 19:55:04 +020078extern struct ring_buffer *
79rb_alloc(int nr_pages, long watermark, int cpu, int flags);
80extern void perf_event_wakeup(struct perf_event *event);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020081extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
Alexander Shishkin1a594132015-01-14 14:18:18 +020082 pgoff_t pgoff, int nr_pages, long watermark, int flags);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020083extern void rb_free_aux(struct ring_buffer *rb);
Alexander Shishkinfdc26702015-01-14 14:18:16 +020084extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
85extern void ring_buffer_put(struct ring_buffer *rb);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020086
87static inline bool rb_has_aux(struct ring_buffer *rb)
88{
89 return !!rb->aux_nr_pages;
90}
Frederic Weisbecker76369132011-05-19 19:55:04 +020091
Alexander Shishkin68db7e92015-01-14 14:18:15 +020092void perf_event_aux_event(struct perf_event *event, unsigned long head,
93 unsigned long size, u64 flags);
94
Frederic Weisbecker76369132011-05-19 19:55:04 +020095extern struct page *
96perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
97
98#ifdef CONFIG_PERF_USE_VMALLOC
99/*
100 * Back perf_mmap() with vmalloc memory.
101 *
102 * Required for architectures that have d-cache aliasing issues.
103 */
104
105static inline int page_order(struct ring_buffer *rb)
106{
107 return rb->page_order;
108}
109
110#else
111
112static inline int page_order(struct ring_buffer *rb)
113{
114 return 0;
115}
116#endif
117
Borislav Petkov9251f902011-10-16 17:15:04 +0200118static inline unsigned long perf_data_size(struct ring_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +0200119{
120 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
121}
122
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +0200123static inline unsigned long perf_aux_size(struct ring_buffer *rb)
124{
125 return rb->aux_nr_pages << PAGE_SHIFT;
126}
127
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200128#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200129{ \
130 unsigned long size, written; \
131 \
132 do { \
Peter Zijlstra0a196842013-10-30 21:16:22 +0100133 size = min(handle->size, len); \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200134 written = memcpy_func(__VA_ARGS__); \
Peter Zijlstra0a196842013-10-30 21:16:22 +0100135 written = size - written; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200136 \
137 len -= written; \
138 handle->addr += written; \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200139 if (advance_buf) \
140 buf += written; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200141 handle->size -= written; \
142 if (!handle->size) { \
143 struct ring_buffer *rb = handle->rb; \
144 \
145 handle->page++; \
146 handle->page &= rb->nr_pages - 1; \
147 handle->addr = rb->data_pages[handle->page]; \
148 handle->size = PAGE_SIZE << page_order(rb); \
149 } \
150 } while (len && written == size); \
151 \
152 return len; \
Frederic Weisbecker76369132011-05-19 19:55:04 +0200153}
154
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200155#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
156static inline unsigned long \
157func_name(struct perf_output_handle *handle, \
158 const void *buf, unsigned long len) \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200159__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200160
161static inline unsigned long
162__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
163 const void *buf, unsigned long len)
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200164{
165 unsigned long orig_len = len;
166 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
167 orig_len - len, size)
168}
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200169
Peter Zijlstra0a196842013-10-30 21:16:22 +0100170static inline unsigned long
171memcpy_common(void *dst, const void *src, unsigned long n)
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200172{
173 memcpy(dst, src, n);
Peter Zijlstra0a196842013-10-30 21:16:22 +0100174 return 0;
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200175}
176
177DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
178
Peter Zijlstra0a196842013-10-30 21:16:22 +0100179static inline unsigned long
180memcpy_skip(void *dst, const void *src, unsigned long n)
181{
182 return 0;
183}
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200184
Peter Zijlstra0a196842013-10-30 21:16:22 +0100185DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200186
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200187#ifndef arch_perf_out_copy_user
Peter Zijlstra0a196842013-10-30 21:16:22 +0100188#define arch_perf_out_copy_user arch_perf_out_copy_user
189
190static inline unsigned long
191arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
192{
193 unsigned long ret;
194
195 pagefault_disable();
196 ret = __copy_from_user_inatomic(dst, src, n);
197 pagefault_enable();
198
199 return ret;
200}
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200201#endif
202
203DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
204
Borislav Petkov9251f902011-10-16 17:15:04 +0200205static inline int get_recursion_context(int *recursion)
206{
207 int rctx;
208
Jesper Dangaard Brouerd0618412017-08-22 19:22:43 +0200209 if (unlikely(in_nmi()))
Borislav Petkov9251f902011-10-16 17:15:04 +0200210 rctx = 3;
211 else if (in_irq())
212 rctx = 2;
213 else if (in_softirq())
214 rctx = 1;
215 else
216 rctx = 0;
217
218 if (recursion[rctx])
219 return -1;
220
221 recursion[rctx]++;
222 barrier();
223
224 return rctx;
225}
226
227static inline void put_recursion_context(int *recursion, int rctx)
228{
229 barrier();
230 recursion[rctx]--;
231}
232
Jiri Olsac5ebced2012-08-07 15:20:40 +0200233#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
234static inline bool arch_perf_have_user_stack_dump(void)
235{
236 return true;
237}
238
239#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
240#else
241static inline bool arch_perf_have_user_stack_dump(void)
242{
243 return false;
244}
245
246#define perf_user_stack_pointer(regs) 0
247#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
248
Frederic Weisbecker76369132011-05-19 19:55:04 +0200249#endif /* _KERNEL_EVENTS_INTERNAL_H */