Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 1 | #ifndef _KERNEL_EVENTS_INTERNAL_H |
| 2 | #define _KERNEL_EVENTS_INTERNAL_H |
| 3 | |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 4 | #include <linux/hardirq.h> |
| 5 | |
| 6 | /* Buffer handling */ |
| 7 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 8 | #define RING_BUFFER_WRITABLE 0x01 |
| 9 | |
| 10 | struct ring_buffer { |
| 11 | atomic_t refcount; |
| 12 | struct rcu_head rcu_head; |
| 13 | #ifdef CONFIG_PERF_USE_VMALLOC |
| 14 | struct work_struct work; |
| 15 | int page_order; /* allocation order */ |
| 16 | #endif |
| 17 | int nr_pages; /* nr of data pages */ |
| 18 | int writable; /* are we writable */ |
| 19 | |
| 20 | atomic_t poll; /* POLL_ for wakeups */ |
| 21 | |
| 22 | local_t head; /* write position */ |
| 23 | local_t nest; /* nested writers */ |
| 24 | local_t events; /* event limit */ |
| 25 | local_t wakeup; /* wakeup stamp */ |
| 26 | local_t lost; /* nr records lost */ |
| 27 | |
| 28 | long watermark; /* wakeup watermark */ |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 29 | /* poll crap */ |
| 30 | spinlock_t event_lock; |
| 31 | struct list_head event_list; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 32 | |
| 33 | struct perf_event_mmap_page *user_page; |
| 34 | void *data_pages[0]; |
| 35 | }; |
| 36 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 37 | extern void rb_free(struct ring_buffer *rb); |
| 38 | extern struct ring_buffer * |
| 39 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); |
| 40 | extern void perf_event_wakeup(struct perf_event *event); |
| 41 | |
| 42 | extern void |
| 43 | perf_event_header__init_id(struct perf_event_header *header, |
| 44 | struct perf_sample_data *data, |
| 45 | struct perf_event *event); |
| 46 | extern void |
| 47 | perf_event__output_id_sample(struct perf_event *event, |
| 48 | struct perf_output_handle *handle, |
| 49 | struct perf_sample_data *sample); |
| 50 | |
| 51 | extern struct page * |
| 52 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); |
| 53 | |
| 54 | #ifdef CONFIG_PERF_USE_VMALLOC |
| 55 | /* |
| 56 | * Back perf_mmap() with vmalloc memory. |
| 57 | * |
| 58 | * Required for architectures that have d-cache aliasing issues. |
| 59 | */ |
| 60 | |
| 61 | static inline int page_order(struct ring_buffer *rb) |
| 62 | { |
| 63 | return rb->page_order; |
| 64 | } |
| 65 | |
| 66 | #else |
| 67 | |
| 68 | static inline int page_order(struct ring_buffer *rb) |
| 69 | { |
| 70 | return 0; |
| 71 | } |
| 72 | #endif |
| 73 | |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 74 | static inline unsigned long perf_data_size(struct ring_buffer *rb) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 75 | { |
| 76 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); |
| 77 | } |
| 78 | |
| 79 | static inline void |
| 80 | __output_copy(struct perf_output_handle *handle, |
| 81 | const void *buf, unsigned int len) |
| 82 | { |
| 83 | do { |
| 84 | unsigned long size = min_t(unsigned long, handle->size, len); |
| 85 | |
| 86 | memcpy(handle->addr, buf, size); |
| 87 | |
| 88 | len -= size; |
| 89 | handle->addr += size; |
| 90 | buf += size; |
| 91 | handle->size -= size; |
| 92 | if (!handle->size) { |
| 93 | struct ring_buffer *rb = handle->rb; |
| 94 | |
| 95 | handle->page++; |
| 96 | handle->page &= rb->nr_pages - 1; |
| 97 | handle->addr = rb->data_pages[handle->page]; |
| 98 | handle->size = PAGE_SIZE << page_order(rb); |
| 99 | } |
| 100 | } while (len); |
| 101 | } |
| 102 | |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 103 | /* Callchain handling */ |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 104 | extern struct perf_callchain_entry * |
| 105 | perf_callchain(struct perf_event *event, struct pt_regs *regs); |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 106 | extern int get_callchain_buffers(void); |
| 107 | extern void put_callchain_buffers(void); |
| 108 | |
| 109 | static inline int get_recursion_context(int *recursion) |
| 110 | { |
| 111 | int rctx; |
| 112 | |
| 113 | if (in_nmi()) |
| 114 | rctx = 3; |
| 115 | else if (in_irq()) |
| 116 | rctx = 2; |
| 117 | else if (in_softirq()) |
| 118 | rctx = 1; |
| 119 | else |
| 120 | rctx = 0; |
| 121 | |
| 122 | if (recursion[rctx]) |
| 123 | return -1; |
| 124 | |
| 125 | recursion[rctx]++; |
| 126 | barrier(); |
| 127 | |
| 128 | return rctx; |
| 129 | } |
| 130 | |
| 131 | static inline void put_recursion_context(int *recursion, int rctx) |
| 132 | { |
| 133 | barrier(); |
| 134 | recursion[rctx]--; |
| 135 | } |
| 136 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 137 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ |