blob: 27385641ecb60826e4226531a6e5c92bfaddf971 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counters:
3 *
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6 *
7 * Data type definitions, declarations, prototypes.
8 *
9 * Started by: Thomas Gleixner and Ingo Molnar
10 *
11 * For licencing details see kernel-base/COPYING
12 */
13#ifndef _LINUX_PERF_COUNTER_H
14#define _LINUX_PERF_COUNTER_H
15
16#include <asm/atomic.h>
17
18#include <linux/list.h>
19#include <linux/mutex.h>
20#include <linux/rculist.h>
21#include <linux/rcupdate.h>
22#include <linux/spinlock.h>
23
24struct task_struct;
25
26/*
Ingo Molnar9f66a382008-12-10 12:33:23 +010027 * User-space ABI bits:
28 */
29
30/*
31 * Generalized performance counter event types, used by the hw_event.type
32 * parameter of the sys_perf_counter_open() syscall:
Thomas Gleixner0793a612008-12-04 20:12:29 +010033 */
34enum hw_event_types {
Thomas Gleixner0793a612008-12-04 20:12:29 +010035 /*
Ingo Molnar9f66a382008-12-10 12:33:23 +010036 * Common hardware events, generalized by the kernel:
Thomas Gleixner0793a612008-12-04 20:12:29 +010037 */
Ingo Molnar9f66a382008-12-10 12:33:23 +010038 PERF_COUNT_CYCLES = 0,
39 PERF_COUNT_INSTRUCTIONS = 1,
40 PERF_COUNT_CACHE_REFERENCES = 2,
41 PERF_COUNT_CACHE_MISSES = 3,
42 PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
43 PERF_COUNT_BRANCH_MISSES = 5,
44
45 /*
46 * Special "software" counters provided by the kernel, even if
47 * the hardware does not support performance counters. These
48 * counters measure various physical and sw events of the
49 * kernel (and allow the profiling of them as well):
50 */
51 PERF_COUNT_CPU_CLOCK = -1,
52 PERF_COUNT_TASK_CLOCK = -2,
53 PERF_COUNT_PAGE_FAULTS = -3,
54 PERF_COUNT_CONTEXT_SWITCHES = -4,
Thomas Gleixner0793a612008-12-04 20:12:29 +010055};
56
57/*
58 * IRQ-notification data record type:
59 */
Ingo Molnar9f66a382008-12-10 12:33:23 +010060enum perf_counter_record_type {
61 PERF_RECORD_SIMPLE = 0,
62 PERF_RECORD_IRQ = 1,
63 PERF_RECORD_GROUP = 2,
Thomas Gleixner0793a612008-12-04 20:12:29 +010064};
65
Ingo Molnar9f66a382008-12-10 12:33:23 +010066/*
67 * Hardware event to monitor via a performance monitoring counter:
68 */
69struct perf_counter_hw_event {
70 u64 type;
71
72 u64 irq_period;
73 u32 record_type;
74
75 u32 disabled : 1, /* off by default */
76 nmi : 1, /* NMI sampling */
77 raw : 1, /* raw event type */
78 __reserved_1 : 29;
79
80 u64 __reserved_2;
Thomas Gleixnereab656a2008-12-08 19:26:59 +010081};
82
Ingo Molnar9f66a382008-12-10 12:33:23 +010083/*
84 * Kernel-internal data types:
85 */
86
Thomas Gleixner0793a612008-12-04 20:12:29 +010087/**
Ingo Molnar9f66a382008-12-10 12:33:23 +010088 * struct hw_perf_counter - performance counter hardware details:
Thomas Gleixner0793a612008-12-04 20:12:29 +010089 */
90struct hw_perf_counter {
Ingo Molnar9f66a382008-12-10 12:33:23 +010091 u64 config;
92 unsigned long config_base;
93 unsigned long counter_base;
94 int nmi;
95 unsigned int idx;
96 u64 prev_count;
97 u64 irq_period;
98 s32 next_count;
Thomas Gleixner0793a612008-12-04 20:12:29 +010099};
100
101/*
102 * Hardcoded buffer length limit for now, for IRQ-fed events:
103 */
Ingo Molnar9f66a382008-12-10 12:33:23 +0100104#define PERF_DATA_BUFLEN 2048
Thomas Gleixner0793a612008-12-04 20:12:29 +0100105
106/**
107 * struct perf_data - performance counter IRQ data sampling ...
108 */
109struct perf_data {
Ingo Molnar9f66a382008-12-10 12:33:23 +0100110 int len;
111 int rd_idx;
112 int overrun;
113 u8 data[PERF_DATA_BUFLEN];
Thomas Gleixner0793a612008-12-04 20:12:29 +0100114};
115
Ingo Molnar621a01e2008-12-11 12:46:46 +0100116struct perf_counter;
117
118/**
119 * struct hw_perf_counter_ops - performance counter hw ops
120 */
121struct hw_perf_counter_ops {
122 void (*hw_perf_counter_enable) (struct perf_counter *counter);
123 void (*hw_perf_counter_disable) (struct perf_counter *counter);
124 void (*hw_perf_counter_read) (struct perf_counter *counter);
125};
126
Thomas Gleixner0793a612008-12-04 20:12:29 +0100127/**
128 * struct perf_counter - performance counter kernel representation:
129 */
130struct perf_counter {
Ingo Molnar04289bb2008-12-11 08:38:42 +0100131 struct list_head list_entry;
132 struct list_head sibling_list;
133 struct perf_counter *group_leader;
Ingo Molnar621a01e2008-12-11 12:46:46 +0100134 struct hw_perf_counter_ops *hw_ops;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100135
Thomas Gleixner0793a612008-12-04 20:12:29 +0100136 int active;
137#if BITS_PER_LONG == 64
138 atomic64_t count;
139#else
140 atomic_t count32[2];
141#endif
Ingo Molnar9f66a382008-12-10 12:33:23 +0100142 struct perf_counter_hw_event hw_event;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100143 struct hw_perf_counter hw;
144
145 struct perf_counter_context *ctx;
146 struct task_struct *task;
147
148 /*
149 * Protect attach/detach:
150 */
151 struct mutex mutex;
152
153 int oncpu;
154 int cpu;
155
Thomas Gleixner0793a612008-12-04 20:12:29 +0100156 /* read() / irq related data */
157 wait_queue_head_t waitq;
158 /* optional: for NMIs */
159 int wakeup_pending;
160 struct perf_data *irqdata;
161 struct perf_data *usrdata;
162 struct perf_data data[2];
163};
164
165/**
166 * struct perf_counter_context - counter context structure
167 *
168 * Used as a container for task counters and CPU counters as well:
169 */
170struct perf_counter_context {
171#ifdef CONFIG_PERF_COUNTERS
172 /*
173 * Protect the list of counters:
174 */
175 spinlock_t lock;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100176
177 struct list_head counter_list;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100178 int nr_counters;
179 int nr_active;
180 struct task_struct *task;
181#endif
182};
183
184/**
185 * struct perf_counter_cpu_context - per cpu counter context structure
186 */
187struct perf_cpu_context {
188 struct perf_counter_context ctx;
189 struct perf_counter_context *task_ctx;
190 int active_oncpu;
191 int max_pertask;
192};
193
194/*
195 * Set by architecture code:
196 */
197extern int perf_max_counters;
198
199#ifdef CONFIG_PERF_COUNTERS
Ingo Molnar621a01e2008-12-11 12:46:46 +0100200extern struct hw_perf_counter_ops *
201hw_perf_counter_init(struct perf_counter *counter);
202
Thomas Gleixner0793a612008-12-04 20:12:29 +0100203extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
204extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
205extern void perf_counter_task_tick(struct task_struct *task, int cpu);
206extern void perf_counter_init_task(struct task_struct *task);
207extern void perf_counter_notify(struct pt_regs *regs);
208extern void perf_counter_print_debug(void);
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100209extern void hw_perf_restore_ctrl(u64 ctrl);
210extern u64 hw_perf_disable_all(void);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100211#else
212static inline void
213perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
214static inline void
215perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
216static inline void
217perf_counter_task_tick(struct task_struct *task, int cpu) { }
218static inline void perf_counter_init_task(struct task_struct *task) { }
219static inline void perf_counter_notify(struct pt_regs *regs) { }
220static inline void perf_counter_print_debug(void) { }
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100221static inline void hw_perf_restore_ctrl(u64 ctrl) { }
222static inline u64 hw_perf_disable_all(void) { return 0; }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100223#endif
224
225#endif /* _LINUX_PERF_COUNTER_H */