blob: 228801e2078869e5d0fbe4618deca7b1dc88d9a8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Frederic Weisbecker76369132011-05-19 19:55:04 +02002#ifndef _KERNEL_EVENTS_INTERNAL_H
3#define _KERNEL_EVENTS_INTERNAL_H
4
Borislav Petkov9251f902011-10-16 17:15:04 +02005#include <linux/hardirq.h>
Frederic Weisbecker91d77532012-08-07 15:20:38 +02006#include <linux/uaccess.h>
Elena Reshetovafecb8ed2019-01-28 14:27:27 +02007#include <linux/refcount.h>
Borislav Petkov9251f902011-10-16 17:15:04 +02008
9/* Buffer handling */
10
Frederic Weisbecker76369132011-05-19 19:55:04 +020011#define RING_BUFFER_WRITABLE 0x01
12
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050013struct perf_buffer {
Elena Reshetovafecb8ed2019-01-28 14:27:27 +020014 refcount_t refcount;
Frederic Weisbecker76369132011-05-19 19:55:04 +020015 struct rcu_head rcu_head;
16#ifdef CONFIG_PERF_USE_VMALLOC
17 struct work_struct work;
18 int page_order; /* allocation order */
19#endif
20 int nr_pages; /* nr of data pages */
Stephane Eraniandd9c0862013-03-18 14:33:28 +010021 int overwrite; /* can overwrite itself */
Wang Nan86e79722016-03-28 06:41:29 +000022 int paused; /* can write into ring buffer */
Frederic Weisbecker76369132011-05-19 19:55:04 +020023
24 atomic_t poll; /* POLL_ for wakeups */
25
26 local_t head; /* write position */
Peter Zijlstra5322ea52019-05-17 13:52:34 +020027 unsigned int nest; /* nested writers */
Frederic Weisbecker76369132011-05-19 19:55:04 +020028 local_t events; /* event limit */
29 local_t wakeup; /* wakeup stamp */
30 local_t lost; /* nr records lost */
31
32 long watermark; /* wakeup watermark */
Alexander Shishkin1a594132015-01-14 14:18:18 +020033 long aux_watermark;
Peter Zijlstra10c6db12011-11-26 02:47:31 +010034 /* poll crap */
35 spinlock_t event_lock;
36 struct list_head event_list;
Frederic Weisbecker76369132011-05-19 19:55:04 +020037
Peter Zijlstra9bb5d402013-06-04 10:44:21 +020038 atomic_t mmap_count;
39 unsigned long mmap_locked;
Peter Zijlstra26cb63a2013-05-28 10:55:48 +020040 struct user_struct *mmap_user;
41
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020042 /* AUX area */
Will Deacon2ab346c2017-08-16 17:18:16 +010043 long aux_head;
Peter Zijlstra5322ea52019-05-17 13:52:34 +020044 unsigned int aux_nest;
Will Deacond9a50b02017-08-16 17:18:17 +010045 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020046 unsigned long aux_pgoff;
47 int aux_nr_pages;
Alexander Shishkin2023a0d2015-01-14 14:18:17 +020048 int aux_overwrite;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020049 atomic_t aux_mmap_count;
50 unsigned long aux_mmap_locked;
51 void (*free_aux)(void *);
Elena Reshetovaca3bb3d2019-01-28 14:27:28 +020052 refcount_t aux_refcount;
Alexander Shishkina4faf002019-10-25 17:08:33 +030053 int aux_in_sampling;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020054 void **aux_pages;
55 void *aux_priv;
56
Frederic Weisbecker76369132011-05-19 19:55:04 +020057 struct perf_event_mmap_page *user_page;
Gustavo A. R. Silvac50c75e2020-05-11 15:12:27 -050058 void *data_pages[];
Frederic Weisbecker76369132011-05-19 19:55:04 +020059};
60
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050061extern void rb_free(struct perf_buffer *rb);
Peter Zijlstra57ffc5c2015-06-18 12:32:49 +020062
63static inline void rb_free_rcu(struct rcu_head *rcu_head)
64{
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050065 struct perf_buffer *rb;
Peter Zijlstra57ffc5c2015-06-18 12:32:49 +020066
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050067 rb = container_of(rcu_head, struct perf_buffer, rcu_head);
Peter Zijlstra57ffc5c2015-06-18 12:32:49 +020068 rb_free(rb);
69}
70
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050071static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
Wang Nan86e79722016-03-28 06:41:29 +000072{
73 if (!pause && rb->nr_pages)
74 rb->paused = 0;
75 else
76 rb->paused = 1;
77}
78
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050079extern struct perf_buffer *
Frederic Weisbecker76369132011-05-19 19:55:04 +020080rb_alloc(int nr_pages, long watermark, int cpu, int flags);
81extern void perf_event_wakeup(struct perf_event *event);
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050082extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
Alexander Shishkin1a594132015-01-14 14:18:18 +020083 pgoff_t pgoff, int nr_pages, long watermark, int flags);
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050084extern void rb_free_aux(struct perf_buffer *rb);
85extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
86extern void ring_buffer_put(struct perf_buffer *rb);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020087
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050088static inline bool rb_has_aux(struct perf_buffer *rb)
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +020089{
90 return !!rb->aux_nr_pages;
91}
Frederic Weisbecker76369132011-05-19 19:55:04 +020092
Alexander Shishkin68db7e92015-01-14 14:18:15 +020093void perf_event_aux_event(struct perf_event *event, unsigned long head,
94 unsigned long size, u64 flags);
95
Frederic Weisbecker76369132011-05-19 19:55:04 +020096extern struct page *
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -050097perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
Frederic Weisbecker76369132011-05-19 19:55:04 +020098
99#ifdef CONFIG_PERF_USE_VMALLOC
100/*
101 * Back perf_mmap() with vmalloc memory.
102 *
103 * Required for architectures that have d-cache aliasing issues.
104 */
105
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -0500106static inline int page_order(struct perf_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +0200107{
108 return rb->page_order;
109}
110
111#else
112
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -0500113static inline int page_order(struct perf_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +0200114{
115 return 0;
116}
117#endif
118
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -0500119static inline unsigned long perf_data_size(struct perf_buffer *rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +0200120{
121 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
122}
123
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -0500124static inline unsigned long perf_aux_size(struct perf_buffer *rb)
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +0200125{
126 return rb->aux_nr_pages << PAGE_SHIFT;
127}
128
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200129#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200130{ \
131 unsigned long size, written; \
132 \
133 do { \
Peter Zijlstra0a196842013-10-30 21:16:22 +0100134 size = min(handle->size, len); \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200135 written = memcpy_func(__VA_ARGS__); \
Peter Zijlstra0a196842013-10-30 21:16:22 +0100136 written = size - written; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200137 \
138 len -= written; \
139 handle->addr += written; \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200140 if (advance_buf) \
141 buf += written; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200142 handle->size -= written; \
143 if (!handle->size) { \
Steven Rostedt (VMware)56de4e82019-12-13 13:21:30 -0500144 struct perf_buffer *rb = handle->rb; \
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200145 \
146 handle->page++; \
147 handle->page &= rb->nr_pages - 1; \
148 handle->addr = rb->data_pages[handle->page]; \
149 handle->size = PAGE_SIZE << page_order(rb); \
150 } \
151 } while (len && written == size); \
152 \
153 return len; \
Frederic Weisbecker76369132011-05-19 19:55:04 +0200154}
155
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200156#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
157static inline unsigned long \
158func_name(struct perf_output_handle *handle, \
159 const void *buf, unsigned long len) \
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200160__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200161
162static inline unsigned long
163__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
164 const void *buf, unsigned long len)
Daniel Borkmannaa7145c2016-07-22 01:19:42 +0200165{
166 unsigned long orig_len = len;
167 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
168 orig_len - len, size)
169}
Daniel Borkmann7e3f9772016-07-14 18:08:03 +0200170
Peter Zijlstra0a196842013-10-30 21:16:22 +0100171static inline unsigned long
172memcpy_common(void *dst, const void *src, unsigned long n)
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200173{
174 memcpy(dst, src, n);
Peter Zijlstra0a196842013-10-30 21:16:22 +0100175 return 0;
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200176}
177
178DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
179
Peter Zijlstra0a196842013-10-30 21:16:22 +0100180static inline unsigned long
181memcpy_skip(void *dst, const void *src, unsigned long n)
182{
183 return 0;
184}
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200185
Peter Zijlstra0a196842013-10-30 21:16:22 +0100186DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
Jiri Olsa5685e0f2012-08-07 15:20:39 +0200187
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200188#ifndef arch_perf_out_copy_user
Peter Zijlstra0a196842013-10-30 21:16:22 +0100189#define arch_perf_out_copy_user arch_perf_out_copy_user
190
191static inline unsigned long
192arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
193{
194 unsigned long ret;
195
196 pagefault_disable();
197 ret = __copy_from_user_inatomic(dst, src, n);
198 pagefault_enable();
199
200 return ret;
201}
Frederic Weisbecker91d77532012-08-07 15:20:38 +0200202#endif
203
204DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
205
Borislav Petkov9251f902011-10-16 17:15:04 +0200206static inline int get_recursion_context(int *recursion)
207{
Peter Zijlstra09da9c82020-10-30 13:43:16 +0100208 unsigned int pc = preempt_count();
209 unsigned char rctx = 0;
Borislav Petkov9251f902011-10-16 17:15:04 +0200210
Peter Zijlstra09da9c82020-10-30 13:43:16 +0100211 rctx += !!(pc & (NMI_MASK));
212 rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
213 rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
Borislav Petkov9251f902011-10-16 17:15:04 +0200214
215 if (recursion[rctx])
216 return -1;
217
218 recursion[rctx]++;
219 barrier();
220
221 return rctx;
222}
223
224static inline void put_recursion_context(int *recursion, int rctx)
225{
226 barrier();
227 recursion[rctx]--;
228}
229
Jiri Olsac5ebced2012-08-07 15:20:40 +0200230#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
231static inline bool arch_perf_have_user_stack_dump(void)
232{
233 return true;
234}
235
236#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
237#else
238static inline bool arch_perf_have_user_stack_dump(void)
239{
240 return false;
241}
242
243#define perf_user_stack_pointer(regs) 0
244#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
245
Frederic Weisbecker76369132011-05-19 19:55:04 +0200246#endif /* _KERNEL_EVENTS_INTERNAL_H */