blob: daedd7d87c2a9693952217db284d039be74bc610 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counters:
3 *
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6 *
7 * Data type definitions, declarations, prototypes.
8 *
9 * Started by: Thomas Gleixner and Ingo Molnar
10 *
11 * For licencing details see kernel-base/COPYING
12 */
13#ifndef _LINUX_PERF_COUNTER_H
14#define _LINUX_PERF_COUNTER_H
15
16#include <asm/atomic.h>
17
18#include <linux/list.h>
19#include <linux/mutex.h>
20#include <linux/rculist.h>
21#include <linux/rcupdate.h>
22#include <linux/spinlock.h>
23
24struct task_struct;
25
26/*
27 * Generalized hardware event types, used by the hw_event_type parameter
28 * of the sys_perf_counter_open() syscall:
29 */
30enum hw_event_types {
31 PERF_COUNT_CYCLES,
32 PERF_COUNT_INSTRUCTIONS,
33 PERF_COUNT_CACHE_REFERENCES,
34 PERF_COUNT_CACHE_MISSES,
35 PERF_COUNT_BRANCH_INSTRUCTIONS,
36 PERF_COUNT_BRANCH_MISSES,
37 /*
38 * If this bit is set in the type, then trigger NMI sampling:
39 */
40 PERF_COUNT_NMI = (1 << 30),
Thomas Gleixnereab656a2008-12-08 19:26:59 +010041 PERF_COUNT_RAW = (1 << 31),
Thomas Gleixner0793a612008-12-04 20:12:29 +010042};
43
44/*
45 * IRQ-notification data record type:
46 */
47enum perf_record_type {
48 PERF_RECORD_SIMPLE,
49 PERF_RECORD_IRQ,
50 PERF_RECORD_GROUP,
51};
52
Thomas Gleixnereab656a2008-12-08 19:26:59 +010053struct perf_counter_event {
54 u32 hw_event_type;
55 u32 hw_event_period;
56 u64 hw_raw_ctrl;
57};
58
Thomas Gleixner0793a612008-12-04 20:12:29 +010059/**
60 * struct hw_perf_counter - performance counter hardware details
61 */
62struct hw_perf_counter {
63 u64 config;
64 unsigned long config_base;
65 unsigned long counter_base;
66 int nmi;
67 unsigned int idx;
68 u64 prev_count;
69 s32 next_count;
70 u64 irq_period;
71};
72
73/*
74 * Hardcoded buffer length limit for now, for IRQ-fed events:
75 */
76#define PERF_DATA_BUFLEN 2048
77
78/**
79 * struct perf_data - performance counter IRQ data sampling ...
80 */
81struct perf_data {
82 int len;
83 int rd_idx;
84 int overrun;
85 u8 data[PERF_DATA_BUFLEN];
86};
87
88/**
89 * struct perf_counter - performance counter kernel representation:
90 */
91struct perf_counter {
92 struct list_head list;
93 int active;
94#if BITS_PER_LONG == 64
95 atomic64_t count;
96#else
97 atomic_t count32[2];
98#endif
99 u64 __irq_period;
100
101 struct hw_perf_counter hw;
102
103 struct perf_counter_context *ctx;
104 struct task_struct *task;
105
106 /*
107 * Protect attach/detach:
108 */
109 struct mutex mutex;
110
111 int oncpu;
112 int cpu;
113
114 s32 hw_event_type;
115 enum perf_record_type record_type;
116
117 /* read() / irq related data */
118 wait_queue_head_t waitq;
119 /* optional: for NMIs */
120 int wakeup_pending;
121 struct perf_data *irqdata;
122 struct perf_data *usrdata;
123 struct perf_data data[2];
124};
125
126/**
127 * struct perf_counter_context - counter context structure
128 *
129 * Used as a container for task counters and CPU counters as well:
130 */
131struct perf_counter_context {
132#ifdef CONFIG_PERF_COUNTERS
133 /*
134 * Protect the list of counters:
135 */
136 spinlock_t lock;
137 struct list_head counters;
138 int nr_counters;
139 int nr_active;
140 struct task_struct *task;
141#endif
142};
143
144/**
145 * struct perf_counter_cpu_context - per cpu counter context structure
146 */
147struct perf_cpu_context {
148 struct perf_counter_context ctx;
149 struct perf_counter_context *task_ctx;
150 int active_oncpu;
151 int max_pertask;
152};
153
154/*
155 * Set by architecture code:
156 */
157extern int perf_max_counters;
158
159#ifdef CONFIG_PERF_COUNTERS
160extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
161extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
162extern void perf_counter_task_tick(struct task_struct *task, int cpu);
163extern void perf_counter_init_task(struct task_struct *task);
164extern void perf_counter_notify(struct pt_regs *regs);
165extern void perf_counter_print_debug(void);
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100166extern void hw_perf_restore_ctrl(u64 ctrl);
167extern u64 hw_perf_disable_all(void);
Thomas Gleixner0793a612008-12-04 20:12:29 +0100168#else
169static inline void
170perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
171static inline void
172perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
173static inline void
174perf_counter_task_tick(struct task_struct *task, int cpu) { }
175static inline void perf_counter_init_task(struct task_struct *task) { }
176static inline void perf_counter_notify(struct pt_regs *regs) { }
177static inline void perf_counter_print_debug(void) { }
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100178static inline void hw_perf_restore_ctrl(u64 ctrl) { }
179static inline u64 hw_perf_disable_all(void) { return 0; }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100180#endif
181
182#endif /* _LINUX_PERF_COUNTER_H */