blob: ff86c558af4c28dd4c9a7ea92cc592b70bee6d7d [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/file.h>
17#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020019#include <linux/hash.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020020#include <linux/sysfs.h>
21#include <linux/dcache.h>
22#include <linux/percpu.h>
23#include <linux/ptrace.h>
24#include <linux/vmstat.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020025#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020026#include <linux/hardirq.h>
27#include <linux/rculist.h>
28#include <linux/uaccess.h>
29#include <linux/syscalls.h>
30#include <linux/anon_inodes.h>
31#include <linux/kernel_stat.h>
32#include <linux/perf_event.h>
Li Zefan6fb29152009-10-15 11:21:42 +080033#include <linux/ftrace_event.h>
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020034#include <linux/hw_breakpoint.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020035
36#include <asm/irq_regs.h>
37
38/*
39 * Each CPU has a list of per CPU events:
40 */
Xiao Guangrongaa5452d2009-12-09 11:28:13 +080041static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020042
43int perf_max_events __read_mostly = 1;
44static int perf_reserved_percpu __read_mostly;
45static int perf_overcommit __read_mostly = 1;
46
47static atomic_t nr_events __read_mostly;
48static atomic_t nr_mmap_events __read_mostly;
49static atomic_t nr_comm_events __read_mostly;
50static atomic_t nr_task_events __read_mostly;
51
52/*
53 * perf event paranoia level:
54 * -1 - not paranoid at all
55 * 0 - disallow raw tracepoint access for unpriv
56 * 1 - disallow cpu events for unpriv
57 * 2 - disallow kernel profiling for unpriv
58 */
59int sysctl_perf_event_paranoid __read_mostly = 1;
60
Ingo Molnarcdd6c482009-09-21 12:02:48 +020061int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
62
63/*
64 * max perf event sample rate
65 */
66int sysctl_perf_event_sample_rate __read_mostly = 100000;
67
68static atomic64_t perf_event_id;
69
70/*
71 * Lock for (sysadmin-configurable) event reservations:
72 */
73static DEFINE_SPINLOCK(perf_resource_lock);
74
75/*
76 * Architecture provided APIs - weak aliases:
77 */
78extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
79{
80 return NULL;
81}
82
83void __weak hw_perf_disable(void) { barrier(); }
84void __weak hw_perf_enable(void) { barrier(); }
85
Ingo Molnarcdd6c482009-09-21 12:02:48 +020086void __weak perf_event_print_debug(void) { }
87
88static DEFINE_PER_CPU(int, perf_disable_count);
89
Ingo Molnarcdd6c482009-09-21 12:02:48 +020090void perf_disable(void)
91{
Peter Zijlstra32975a42010-03-06 19:49:19 +010092 if (!__get_cpu_var(perf_disable_count)++)
93 hw_perf_disable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +020094}
95
96void perf_enable(void)
97{
Peter Zijlstra32975a42010-03-06 19:49:19 +010098 if (!--__get_cpu_var(perf_disable_count))
Ingo Molnarcdd6c482009-09-21 12:02:48 +020099 hw_perf_enable();
100}
101
102static void get_ctx(struct perf_event_context *ctx)
103{
104 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
105}
106
107static void free_ctx(struct rcu_head *head)
108{
109 struct perf_event_context *ctx;
110
111 ctx = container_of(head, struct perf_event_context, rcu_head);
112 kfree(ctx);
113}
114
115static void put_ctx(struct perf_event_context *ctx)
116{
117 if (atomic_dec_and_test(&ctx->refcount)) {
118 if (ctx->parent_ctx)
119 put_ctx(ctx->parent_ctx);
120 if (ctx->task)
121 put_task_struct(ctx->task);
122 call_rcu(&ctx->rcu_head, free_ctx);
123 }
124}
125
126static void unclone_ctx(struct perf_event_context *ctx)
127{
128 if (ctx->parent_ctx) {
129 put_ctx(ctx->parent_ctx);
130 ctx->parent_ctx = NULL;
131 }
132}
133
134/*
135 * If we inherit events we want to return the parent event id
136 * to userspace.
137 */
138static u64 primary_event_id(struct perf_event *event)
139{
140 u64 id = event->id;
141
142 if (event->parent)
143 id = event->parent->id;
144
145 return id;
146}
147
148/*
149 * Get the perf_event_context for a task and lock it.
150 * This has to cope with with the fact that until it is locked,
151 * the context could get moved to another task.
152 */
153static struct perf_event_context *
154perf_lock_task_context(struct task_struct *task, unsigned long *flags)
155{
156 struct perf_event_context *ctx;
157
158 rcu_read_lock();
159 retry:
160 ctx = rcu_dereference(task->perf_event_ctxp);
161 if (ctx) {
162 /*
163 * If this context is a clone of another, it might
164 * get swapped for another underneath us by
165 * perf_event_task_sched_out, though the
166 * rcu_read_lock() protects us from any context
167 * getting freed. Lock the context and check if it
168 * got swapped before we could get the lock, and retry
169 * if so. If we locked the right context, then it
170 * can't get swapped on us any more.
171 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100172 raw_spin_lock_irqsave(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200173 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100174 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200175 goto retry;
176 }
177
178 if (!atomic_inc_not_zero(&ctx->refcount)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100179 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200180 ctx = NULL;
181 }
182 }
183 rcu_read_unlock();
184 return ctx;
185}
186
187/*
188 * Get the context for a task and increment its pin_count so it
189 * can't get swapped to another task. This also increments its
190 * reference count so that the context can't get freed.
191 */
192static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
193{
194 struct perf_event_context *ctx;
195 unsigned long flags;
196
197 ctx = perf_lock_task_context(task, &flags);
198 if (ctx) {
199 ++ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100200 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200201 }
202 return ctx;
203}
204
205static void perf_unpin_context(struct perf_event_context *ctx)
206{
207 unsigned long flags;
208
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100209 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200210 --ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100211 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200212 put_ctx(ctx);
213}
214
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100215static inline u64 perf_clock(void)
216{
Peter Zijlstra24691ea2010-02-26 16:36:23 +0100217 return cpu_clock(raw_smp_processor_id());
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100218}
219
220/*
221 * Update the record of the current time in a context.
222 */
223static void update_context_time(struct perf_event_context *ctx)
224{
225 u64 now = perf_clock();
226
227 ctx->time += now - ctx->timestamp;
228 ctx->timestamp = now;
229}
230
231/*
232 * Update the total_time_enabled and total_time_running fields for a event.
233 */
234static void update_event_times(struct perf_event *event)
235{
236 struct perf_event_context *ctx = event->ctx;
237 u64 run_end;
238
239 if (event->state < PERF_EVENT_STATE_INACTIVE ||
240 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
241 return;
242
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100243 if (ctx->is_active)
244 run_end = ctx->time;
245 else
246 run_end = event->tstamp_stopped;
247
248 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100249
250 if (event->state == PERF_EVENT_STATE_INACTIVE)
251 run_end = event->tstamp_stopped;
252 else
253 run_end = ctx->time;
254
255 event->total_time_running = run_end - event->tstamp_running;
256}
257
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200258/*
259 * Update total_time_enabled and total_time_running for all events in a group.
260 */
261static void update_group_times(struct perf_event *leader)
262{
263 struct perf_event *event;
264
265 update_event_times(leader);
266 list_for_each_entry(event, &leader->sibling_list, group_entry)
267 update_event_times(event);
268}
269
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100270static struct list_head *
271ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
272{
273 if (event->attr.pinned)
274 return &ctx->pinned_groups;
275 else
276 return &ctx->flexible_groups;
277}
278
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200279/*
280 * Add a event from the lists for its context.
281 * Must be called with ctx->mutex and ctx->lock held.
282 */
283static void
284list_add_event(struct perf_event *event, struct perf_event_context *ctx)
285{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200286 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
287 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200288
289 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +0200290 * If we're a stand alone event or group leader, we go to the context
291 * list, group events are kept attached to the group so that
292 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200293 */
Peter Zijlstra8a495422010-05-27 15:47:49 +0200294 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100295 struct list_head *list;
296
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100297 if (is_software_event(event))
298 event->group_flags |= PERF_GROUP_SOFTWARE;
299
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100300 list = ctx_group_list(event, ctx);
301 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200302 }
303
304 list_add_rcu(&event->event_entry, &ctx->event_list);
305 ctx->nr_events++;
306 if (event->attr.inherit_stat)
307 ctx->nr_stat++;
308}
309
Peter Zijlstra8a495422010-05-27 15:47:49 +0200310static void perf_group_attach(struct perf_event *event)
311{
312 struct perf_event *group_leader = event->group_leader;
313
314 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
315 event->attach_state |= PERF_ATTACH_GROUP;
316
317 if (group_leader == event)
318 return;
319
320 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
321 !is_software_event(event))
322 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
323
324 list_add_tail(&event->group_entry, &group_leader->sibling_list);
325 group_leader->nr_siblings++;
326}
327
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200328/*
329 * Remove a event from the lists for its context.
330 * Must be called with ctx->mutex and ctx->lock held.
331 */
332static void
333list_del_event(struct perf_event *event, struct perf_event_context *ctx)
334{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200335 /*
336 * We can have double detach due to exit/hot-unplug + close.
337 */
338 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200339 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200340
341 event->attach_state &= ~PERF_ATTACH_CONTEXT;
342
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200343 ctx->nr_events--;
344 if (event->attr.inherit_stat)
345 ctx->nr_stat--;
346
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200347 list_del_rcu(&event->event_entry);
348
Peter Zijlstra8a495422010-05-27 15:47:49 +0200349 if (event->group_leader == event)
350 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200351
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200352 update_group_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -0800353
354 /*
355 * If event was in error state, then keep it
356 * that way, otherwise bogus counts will be
357 * returned on read(). The only way to get out
358 * of error state is by explicit re-enabling
359 * of the event
360 */
361 if (event->state > PERF_EVENT_STATE_OFF)
362 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra050735b2010-05-11 11:51:53 +0200363}
364
Peter Zijlstra8a495422010-05-27 15:47:49 +0200365static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +0200366{
367 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200368 struct list_head *list = NULL;
369
370 /*
371 * We can have double detach due to exit/hot-unplug + close.
372 */
373 if (!(event->attach_state & PERF_ATTACH_GROUP))
374 return;
375
376 event->attach_state &= ~PERF_ATTACH_GROUP;
377
378 /*
379 * If this is a sibling, remove it from its group.
380 */
381 if (event->group_leader != event) {
382 list_del_init(&event->group_entry);
383 event->group_leader->nr_siblings--;
384 return;
385 }
386
387 if (!list_empty(&event->group_entry))
388 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +0100389
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200390 /*
391 * If this was a group event with sibling events then
392 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +0200393 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200394 */
395 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +0200396 if (list)
397 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200398 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100399
400 /* Inherit group flags from the previous leader */
401 sibling->group_flags = event->group_flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200402 }
403}
404
405static void
406event_sched_out(struct perf_event *event,
407 struct perf_cpu_context *cpuctx,
408 struct perf_event_context *ctx)
409{
410 if (event->state != PERF_EVENT_STATE_ACTIVE)
411 return;
412
413 event->state = PERF_EVENT_STATE_INACTIVE;
414 if (event->pending_disable) {
415 event->pending_disable = 0;
416 event->state = PERF_EVENT_STATE_OFF;
417 }
418 event->tstamp_stopped = ctx->time;
419 event->pmu->disable(event);
420 event->oncpu = -1;
421
422 if (!is_software_event(event))
423 cpuctx->active_oncpu--;
424 ctx->nr_active--;
425 if (event->attr.exclusive || !cpuctx->active_oncpu)
426 cpuctx->exclusive = 0;
427}
428
429static void
430group_sched_out(struct perf_event *group_event,
431 struct perf_cpu_context *cpuctx,
432 struct perf_event_context *ctx)
433{
434 struct perf_event *event;
435
436 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
437 return;
438
439 event_sched_out(group_event, cpuctx, ctx);
440
441 /*
442 * Schedule out siblings (if any):
443 */
444 list_for_each_entry(event, &group_event->sibling_list, group_entry)
445 event_sched_out(event, cpuctx, ctx);
446
447 if (group_event->attr.exclusive)
448 cpuctx->exclusive = 0;
449}
450
451/*
452 * Cross CPU call to remove a performance event
453 *
454 * We disable the event on the hardware level first. After that we
455 * remove it from the context list.
456 */
457static void __perf_event_remove_from_context(void *info)
458{
459 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
460 struct perf_event *event = info;
461 struct perf_event_context *ctx = event->ctx;
462
463 /*
464 * If this is a task context, we need to check whether it is
465 * the current task context of this cpu. If not it has been
466 * scheduled out before the smp call arrived.
467 */
468 if (ctx->task && cpuctx->task_ctx != ctx)
469 return;
470
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100471 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200472 /*
473 * Protect the list operation against NMI by disabling the
474 * events on a global level.
475 */
476 perf_disable();
477
478 event_sched_out(event, cpuctx, ctx);
479
480 list_del_event(event, ctx);
481
482 if (!ctx->task) {
483 /*
484 * Allow more per task events with respect to the
485 * reservation:
486 */
487 cpuctx->max_pertask =
488 min(perf_max_events - ctx->nr_events,
489 perf_max_events - perf_reserved_percpu);
490 }
491
492 perf_enable();
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100493 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200494}
495
496
497/*
498 * Remove the event from a task's (or a CPU's) list of events.
499 *
500 * Must be called with ctx->mutex held.
501 *
502 * CPU events are removed with a smp call. For task events we only
503 * call when the task is on a CPU.
504 *
505 * If event->ctx is a cloned context, callers must make sure that
506 * every task struct that event->ctx->task could possibly point to
507 * remains valid. This is OK when called from perf_release since
508 * that only calls us on the top-level context, which can't be a clone.
509 * When called from perf_event_exit_task, it's OK because the
510 * context has been detached from its task.
511 */
512static void perf_event_remove_from_context(struct perf_event *event)
513{
514 struct perf_event_context *ctx = event->ctx;
515 struct task_struct *task = ctx->task;
516
517 if (!task) {
518 /*
519 * Per cpu events are removed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200520 * the removal is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200521 */
522 smp_call_function_single(event->cpu,
523 __perf_event_remove_from_context,
524 event, 1);
525 return;
526 }
527
528retry:
529 task_oncpu_function_call(task, __perf_event_remove_from_context,
530 event);
531
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100532 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200533 /*
534 * If the context is active we need to retry the smp call.
535 */
536 if (ctx->nr_active && !list_empty(&event->group_entry)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100537 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200538 goto retry;
539 }
540
541 /*
542 * The lock prevents that this context is scheduled in so we
543 * can remove the event safely, if the call above did not
544 * succeed.
545 */
Peter Zijlstra6c2bfcb2009-11-23 11:37:24 +0100546 if (!list_empty(&event->group_entry))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200547 list_del_event(event, ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100548 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200549}
550
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200551/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200552 * Cross CPU call to disable a performance event
553 */
554static void __perf_event_disable(void *info)
555{
556 struct perf_event *event = info;
557 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
558 struct perf_event_context *ctx = event->ctx;
559
560 /*
561 * If this is a per-task event, need to check whether this
562 * event's task is the current task on this cpu.
563 */
564 if (ctx->task && cpuctx->task_ctx != ctx)
565 return;
566
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100567 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200568
569 /*
570 * If the event is on, turn it off.
571 * If it is in error state, leave it in error state.
572 */
573 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
574 update_context_time(ctx);
575 update_group_times(event);
576 if (event == event->group_leader)
577 group_sched_out(event, cpuctx, ctx);
578 else
579 event_sched_out(event, cpuctx, ctx);
580 event->state = PERF_EVENT_STATE_OFF;
581 }
582
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100583 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200584}
585
586/*
587 * Disable a event.
588 *
589 * If event->ctx is a cloned context, callers must make sure that
590 * every task struct that event->ctx->task could possibly point to
591 * remains valid. This condition is satisifed when called through
592 * perf_event_for_each_child or perf_event_for_each because they
593 * hold the top-level event's child_mutex, so any descendant that
594 * goes to exit will block in sync_child_event.
595 * When called from perf_pending_event it's OK because event->ctx
596 * is the current context on this CPU and preemption is disabled,
597 * hence we can't get into perf_event_task_sched_out for this context.
598 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +0100599void perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200600{
601 struct perf_event_context *ctx = event->ctx;
602 struct task_struct *task = ctx->task;
603
604 if (!task) {
605 /*
606 * Disable the event on the cpu that it's on
607 */
608 smp_call_function_single(event->cpu, __perf_event_disable,
609 event, 1);
610 return;
611 }
612
613 retry:
614 task_oncpu_function_call(task, __perf_event_disable, event);
615
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100616 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200617 /*
618 * If the event is still active, we need to retry the cross-call.
619 */
620 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100621 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200622 goto retry;
623 }
624
625 /*
626 * Since we have the lock this context can't be scheduled
627 * in, so we can change the state safely.
628 */
629 if (event->state == PERF_EVENT_STATE_INACTIVE) {
630 update_group_times(event);
631 event->state = PERF_EVENT_STATE_OFF;
632 }
633
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100634 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200635}
636
637static int
638event_sched_in(struct perf_event *event,
639 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100640 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200641{
642 if (event->state <= PERF_EVENT_STATE_OFF)
643 return 0;
644
645 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +0100646 event->oncpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200647 /*
648 * The new state must be visible before we turn it on in the hardware:
649 */
650 smp_wmb();
651
652 if (event->pmu->enable(event)) {
653 event->state = PERF_EVENT_STATE_INACTIVE;
654 event->oncpu = -1;
655 return -EAGAIN;
656 }
657
658 event->tstamp_running += ctx->time - event->tstamp_stopped;
659
660 if (!is_software_event(event))
661 cpuctx->active_oncpu++;
662 ctx->nr_active++;
663
664 if (event->attr.exclusive)
665 cpuctx->exclusive = 1;
666
667 return 0;
668}
669
670static int
671group_sched_in(struct perf_event *group_event,
672 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +0100673 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200674{
Lin Ming6bde9b62010-04-23 13:56:00 +0800675 struct perf_event *event, *partial_group = NULL;
676 const struct pmu *pmu = group_event->pmu;
677 bool txn = false;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200678 int ret;
679
680 if (group_event->state == PERF_EVENT_STATE_OFF)
681 return 0;
682
Lin Ming6bde9b62010-04-23 13:56:00 +0800683 /* Check if group transaction availabe */
684 if (pmu->start_txn)
685 txn = true;
686
687 if (txn)
688 pmu->start_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200689
Stephane Eranian90151c352010-05-25 16:23:10 +0200690 if (event_sched_in(group_event, cpuctx, ctx)) {
691 if (txn)
692 pmu->cancel_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200693 return -EAGAIN;
Stephane Eranian90151c352010-05-25 16:23:10 +0200694 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200695
696 /*
697 * Schedule in siblings as one group (if any):
698 */
699 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Peter Zijlstra6e377382010-02-11 13:21:58 +0100700 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200701 partial_group = event;
702 goto group_error;
703 }
704 }
705
Paul Mackerras6e851582010-05-08 20:58:00 +1000706 if (!txn)
707 return 0;
Lin Ming6bde9b62010-04-23 13:56:00 +0800708
Paul Mackerras6e851582010-05-08 20:58:00 +1000709 ret = pmu->commit_txn(pmu);
710 if (!ret) {
711 pmu->cancel_txn(pmu);
712 return 0;
Lin Ming6bde9b62010-04-23 13:56:00 +0800713 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200714
715group_error:
716 /*
717 * Groups can be scheduled in as one unit only, so undo any
718 * partial group before returning:
719 */
720 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
721 if (event == partial_group)
722 break;
723 event_sched_out(event, cpuctx, ctx);
724 }
725 event_sched_out(group_event, cpuctx, ctx);
726
Stephane Eranian90151c352010-05-25 16:23:10 +0200727 if (txn)
728 pmu->cancel_txn(pmu);
729
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200730 return -EAGAIN;
731}
732
733/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200734 * Work out whether we can put this event group on the CPU now.
735 */
736static int group_can_go_on(struct perf_event *event,
737 struct perf_cpu_context *cpuctx,
738 int can_add_hw)
739{
740 /*
741 * Groups consisting entirely of software events can always go on.
742 */
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100743 if (event->group_flags & PERF_GROUP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200744 return 1;
745 /*
746 * If an exclusive group is already on, no other hardware
747 * events can go on.
748 */
749 if (cpuctx->exclusive)
750 return 0;
751 /*
752 * If this group is exclusive and there are already
753 * events on the CPU, it can't go on.
754 */
755 if (event->attr.exclusive && cpuctx->active_oncpu)
756 return 0;
757 /*
758 * Otherwise, try to add it if all previous groups were able
759 * to go on.
760 */
761 return can_add_hw;
762}
763
764static void add_event_to_ctx(struct perf_event *event,
765 struct perf_event_context *ctx)
766{
767 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200768 perf_group_attach(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200769 event->tstamp_enabled = ctx->time;
770 event->tstamp_running = ctx->time;
771 event->tstamp_stopped = ctx->time;
772}
773
774/*
775 * Cross CPU call to install and enable a performance event
776 *
777 * Must be called with ctx->mutex held
778 */
779static void __perf_install_in_context(void *info)
780{
781 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
782 struct perf_event *event = info;
783 struct perf_event_context *ctx = event->ctx;
784 struct perf_event *leader = event->group_leader;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200785 int err;
786
787 /*
788 * If this is a task context, we need to check whether it is
789 * the current task context of this cpu. If not it has been
790 * scheduled out before the smp call arrived.
791 * Or possibly this is the right context but it isn't
792 * on this cpu because it had no events.
793 */
794 if (ctx->task && cpuctx->task_ctx != ctx) {
795 if (cpuctx->task_ctx || ctx->task != current)
796 return;
797 cpuctx->task_ctx = ctx;
798 }
799
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100800 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200801 ctx->is_active = 1;
802 update_context_time(ctx);
803
804 /*
805 * Protect the list operation against NMI by disabling the
806 * events on a global level. NOP for non NMI based events.
807 */
808 perf_disable();
809
810 add_event_to_ctx(event, ctx);
811
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100812 if (event->cpu != -1 && event->cpu != smp_processor_id())
813 goto unlock;
814
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200815 /*
816 * Don't put the event on if it is disabled or if
817 * it is in a group and the group isn't on.
818 */
819 if (event->state != PERF_EVENT_STATE_INACTIVE ||
820 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
821 goto unlock;
822
823 /*
824 * An exclusive event can't go on if there are already active
825 * hardware events, and no hardware event can go on if there
826 * is already an exclusive event on.
827 */
828 if (!group_can_go_on(event, cpuctx, 1))
829 err = -EEXIST;
830 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100831 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200832
833 if (err) {
834 /*
835 * This event couldn't go on. If it is in a group
836 * then we have to pull the whole group off.
837 * If the event group is pinned then put it in error state.
838 */
839 if (leader != event)
840 group_sched_out(leader, cpuctx, ctx);
841 if (leader->attr.pinned) {
842 update_group_times(leader);
843 leader->state = PERF_EVENT_STATE_ERROR;
844 }
845 }
846
847 if (!err && !ctx->task && cpuctx->max_pertask)
848 cpuctx->max_pertask--;
849
850 unlock:
851 perf_enable();
852
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100853 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200854}
855
856/*
857 * Attach a performance event to a context
858 *
859 * First we add the event to the list with the hardware enable bit
860 * in event->hw_config cleared.
861 *
862 * If the event is attached to a task which is on a CPU we use a smp
863 * call to enable it in the task context. The task might have been
864 * scheduled away, but we check this in the smp call again.
865 *
866 * Must be called with ctx->mutex held.
867 */
868static void
869perf_install_in_context(struct perf_event_context *ctx,
870 struct perf_event *event,
871 int cpu)
872{
873 struct task_struct *task = ctx->task;
874
875 if (!task) {
876 /*
877 * Per cpu events are installed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200878 * the install is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200879 */
880 smp_call_function_single(cpu, __perf_install_in_context,
881 event, 1);
882 return;
883 }
884
885retry:
886 task_oncpu_function_call(task, __perf_install_in_context,
887 event);
888
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100889 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200890 /*
891 * we need to retry the smp call.
892 */
893 if (ctx->is_active && list_empty(&event->group_entry)) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100894 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200895 goto retry;
896 }
897
898 /*
899 * The lock prevents that this context is scheduled in so we
900 * can add the event safely, if it the call above did not
901 * succeed.
902 */
903 if (list_empty(&event->group_entry))
904 add_event_to_ctx(event, ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100905 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200906}
907
908/*
909 * Put a event into inactive state and update time fields.
910 * Enabling the leader of a group effectively enables all
911 * the group members that aren't explicitly disabled, so we
912 * have to update their ->tstamp_enabled also.
913 * Note: this works for group members as well as group leaders
914 * since the non-leader members' sibling_lists will be empty.
915 */
916static void __perf_event_mark_enabled(struct perf_event *event,
917 struct perf_event_context *ctx)
918{
919 struct perf_event *sub;
920
921 event->state = PERF_EVENT_STATE_INACTIVE;
922 event->tstamp_enabled = ctx->time - event->total_time_enabled;
923 list_for_each_entry(sub, &event->sibling_list, group_entry)
924 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
925 sub->tstamp_enabled =
926 ctx->time - sub->total_time_enabled;
927}
928
929/*
930 * Cross CPU call to enable a performance event
931 */
932static void __perf_event_enable(void *info)
933{
934 struct perf_event *event = info;
935 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
936 struct perf_event_context *ctx = event->ctx;
937 struct perf_event *leader = event->group_leader;
938 int err;
939
940 /*
941 * If this is a per-task event, need to check whether this
942 * event's task is the current task on this cpu.
943 */
944 if (ctx->task && cpuctx->task_ctx != ctx) {
945 if (cpuctx->task_ctx || ctx->task != current)
946 return;
947 cpuctx->task_ctx = ctx;
948 }
949
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100950 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200951 ctx->is_active = 1;
952 update_context_time(ctx);
953
954 if (event->state >= PERF_EVENT_STATE_INACTIVE)
955 goto unlock;
956 __perf_event_mark_enabled(event, ctx);
957
Peter Zijlstraf4c41762009-12-16 17:55:54 +0100958 if (event->cpu != -1 && event->cpu != smp_processor_id())
959 goto unlock;
960
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200961 /*
962 * If the event is in a group and isn't the group leader,
963 * then don't put it on unless the group is on.
964 */
965 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
966 goto unlock;
967
968 if (!group_can_go_on(event, cpuctx, 1)) {
969 err = -EEXIST;
970 } else {
971 perf_disable();
972 if (event == leader)
Peter Zijlstra6e377382010-02-11 13:21:58 +0100973 err = group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200974 else
Peter Zijlstra6e377382010-02-11 13:21:58 +0100975 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200976 perf_enable();
977 }
978
979 if (err) {
980 /*
981 * If this event can't go on and it's part of a
982 * group, then the whole group has to come off.
983 */
984 if (leader != event)
985 group_sched_out(leader, cpuctx, ctx);
986 if (leader->attr.pinned) {
987 update_group_times(leader);
988 leader->state = PERF_EVENT_STATE_ERROR;
989 }
990 }
991
992 unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +0100993 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200994}
995
996/*
997 * Enable a event.
998 *
999 * If event->ctx is a cloned context, callers must make sure that
1000 * every task struct that event->ctx->task could possibly point to
1001 * remains valid. This condition is satisfied when called through
1002 * perf_event_for_each_child or perf_event_for_each as described
1003 * for perf_event_disable.
1004 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +01001005void perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001006{
1007 struct perf_event_context *ctx = event->ctx;
1008 struct task_struct *task = ctx->task;
1009
1010 if (!task) {
1011 /*
1012 * Enable the event on the cpu that it's on
1013 */
1014 smp_call_function_single(event->cpu, __perf_event_enable,
1015 event, 1);
1016 return;
1017 }
1018
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001019 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001020 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1021 goto out;
1022
1023 /*
1024 * If the event is in error state, clear that first.
1025 * That way, if we see the event in error state below, we
1026 * know that it has gone back into error state, as distinct
1027 * from the task having been scheduled away before the
1028 * cross-call arrived.
1029 */
1030 if (event->state == PERF_EVENT_STATE_ERROR)
1031 event->state = PERF_EVENT_STATE_OFF;
1032
1033 retry:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001034 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001035 task_oncpu_function_call(task, __perf_event_enable, event);
1036
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001037 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001038
1039 /*
1040 * If the context is active and the event is still off,
1041 * we need to retry the cross-call.
1042 */
1043 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1044 goto retry;
1045
1046 /*
1047 * Since we have the lock this context can't be scheduled
1048 * in, so we can change the state safely.
1049 */
1050 if (event->state == PERF_EVENT_STATE_OFF)
1051 __perf_event_mark_enabled(event, ctx);
1052
1053 out:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001054 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001055}
1056
1057static int perf_event_refresh(struct perf_event *event, int refresh)
1058{
1059 /*
1060 * not supported on inherited events
1061 */
1062 if (event->attr.inherit)
1063 return -EINVAL;
1064
1065 atomic_add(refresh, &event->event_limit);
1066 perf_event_enable(event);
1067
1068 return 0;
1069}
1070
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001071enum event_type_t {
1072 EVENT_FLEXIBLE = 0x1,
1073 EVENT_PINNED = 0x2,
1074 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1075};
1076
1077static void ctx_sched_out(struct perf_event_context *ctx,
1078 struct perf_cpu_context *cpuctx,
1079 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001080{
1081 struct perf_event *event;
1082
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001083 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001084 ctx->is_active = 0;
1085 if (likely(!ctx->nr_events))
1086 goto out;
1087 update_context_time(ctx);
1088
1089 perf_disable();
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001090 if (!ctx->nr_active)
1091 goto out_enable;
1092
1093 if (event_type & EVENT_PINNED)
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001094 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1095 group_sched_out(event, cpuctx, ctx);
1096
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001097 if (event_type & EVENT_FLEXIBLE)
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001098 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001099 group_sched_out(event, cpuctx, ctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001100
1101 out_enable:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001102 perf_enable();
1103 out:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001104 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001105}
1106
1107/*
1108 * Test whether two contexts are equivalent, i.e. whether they
1109 * have both been cloned from the same version of the same context
1110 * and they both have the same number of enabled events.
1111 * If the number of enabled events is the same, then the set
1112 * of enabled events should be the same, because these are both
1113 * inherited contexts, therefore we can't access individual events
1114 * in them directly with an fd; we can only enable/disable all
1115 * events via prctl, or enable/disable all events in a family
1116 * via ioctl, which will have the same effect on both contexts.
1117 */
1118static int context_equiv(struct perf_event_context *ctx1,
1119 struct perf_event_context *ctx2)
1120{
1121 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1122 && ctx1->parent_gen == ctx2->parent_gen
1123 && !ctx1->pin_count && !ctx2->pin_count;
1124}
1125
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001126static void __perf_event_sync_stat(struct perf_event *event,
1127 struct perf_event *next_event)
1128{
1129 u64 value;
1130
1131 if (!event->attr.inherit_stat)
1132 return;
1133
1134 /*
1135 * Update the event value, we cannot use perf_event_read()
1136 * because we're in the middle of a context switch and have IRQs
1137 * disabled, which upsets smp_call_function_single(), however
1138 * we know the event must be on the current CPU, therefore we
1139 * don't need to use it.
1140 */
1141 switch (event->state) {
1142 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01001143 event->pmu->read(event);
1144 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001145
1146 case PERF_EVENT_STATE_INACTIVE:
1147 update_event_times(event);
1148 break;
1149
1150 default:
1151 break;
1152 }
1153
1154 /*
1155 * In order to keep per-task stats reliable we need to flip the event
1156 * values when we flip the contexts.
1157 */
1158 value = atomic64_read(&next_event->count);
1159 value = atomic64_xchg(&event->count, value);
1160 atomic64_set(&next_event->count, value);
1161
1162 swap(event->total_time_enabled, next_event->total_time_enabled);
1163 swap(event->total_time_running, next_event->total_time_running);
1164
1165 /*
1166 * Since we swizzled the values, update the user visible data too.
1167 */
1168 perf_event_update_userpage(event);
1169 perf_event_update_userpage(next_event);
1170}
1171
1172#define list_next_entry(pos, member) \
1173 list_entry(pos->member.next, typeof(*pos), member)
1174
1175static void perf_event_sync_stat(struct perf_event_context *ctx,
1176 struct perf_event_context *next_ctx)
1177{
1178 struct perf_event *event, *next_event;
1179
1180 if (!ctx->nr_stat)
1181 return;
1182
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01001183 update_context_time(ctx);
1184
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001185 event = list_first_entry(&ctx->event_list,
1186 struct perf_event, event_entry);
1187
1188 next_event = list_first_entry(&next_ctx->event_list,
1189 struct perf_event, event_entry);
1190
1191 while (&event->event_entry != &ctx->event_list &&
1192 &next_event->event_entry != &next_ctx->event_list) {
1193
1194 __perf_event_sync_stat(event, next_event);
1195
1196 event = list_next_entry(event, event_entry);
1197 next_event = list_next_entry(next_event, event_entry);
1198 }
1199}
1200
1201/*
1202 * Called from scheduler to remove the events of the current task,
1203 * with interrupts disabled.
1204 *
1205 * We stop each event and update the event value in event->count.
1206 *
1207 * This does not protect us against NMI, but disable()
1208 * sets the disabled bit in the control field of event _before_
1209 * accessing the event control register. If a NMI hits, then it will
1210 * not restart the event.
1211 */
1212void perf_event_task_sched_out(struct task_struct *task,
Peter Zijlstra49f47432009-12-27 11:51:52 +01001213 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001214{
Peter Zijlstra49f47432009-12-27 11:51:52 +01001215 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001216 struct perf_event_context *ctx = task->perf_event_ctxp;
1217 struct perf_event_context *next_ctx;
1218 struct perf_event_context *parent;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001219 int do_switch = 1;
1220
Frederic Weisbeckere49a5bd2010-03-22 19:40:03 +01001221 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001222
1223 if (likely(!ctx || !cpuctx->task_ctx))
1224 return;
1225
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001226 rcu_read_lock();
1227 parent = rcu_dereference(ctx->parent_ctx);
1228 next_ctx = next->perf_event_ctxp;
1229 if (parent && next_ctx &&
1230 rcu_dereference(next_ctx->parent_ctx) == parent) {
1231 /*
1232 * Looks like the two contexts are clones, so we might be
1233 * able to optimize the context switch. We lock both
1234 * contexts and check that they are clones under the
1235 * lock (including re-checking that neither has been
1236 * uncloned in the meantime). It doesn't matter which
1237 * order we take the locks because no other cpu could
1238 * be trying to lock both of these tasks.
1239 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001240 raw_spin_lock(&ctx->lock);
1241 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001242 if (context_equiv(ctx, next_ctx)) {
1243 /*
1244 * XXX do we need a memory barrier of sorts
1245 * wrt to rcu_dereference() of perf_event_ctxp
1246 */
1247 task->perf_event_ctxp = next_ctx;
1248 next->perf_event_ctxp = ctx;
1249 ctx->task = next;
1250 next_ctx->task = task;
1251 do_switch = 0;
1252
1253 perf_event_sync_stat(ctx, next_ctx);
1254 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001255 raw_spin_unlock(&next_ctx->lock);
1256 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001257 }
1258 rcu_read_unlock();
1259
1260 if (do_switch) {
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001261 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001262 cpuctx->task_ctx = NULL;
1263 }
1264}
1265
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001266static void task_ctx_sched_out(struct perf_event_context *ctx,
1267 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001268{
1269 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1270
1271 if (!cpuctx->task_ctx)
1272 return;
1273
1274 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1275 return;
1276
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001277 ctx_sched_out(ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001278 cpuctx->task_ctx = NULL;
1279}
1280
1281/*
1282 * Called with IRQs disabled
1283 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001284static void __perf_event_task_sched_out(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001285{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001286 task_ctx_sched_out(ctx, EVENT_ALL);
1287}
1288
1289/*
1290 * Called with IRQs disabled
1291 */
1292static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1293 enum event_type_t event_type)
1294{
1295 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001296}
1297
1298static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001299ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001300 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001301{
1302 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001303
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001304 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1305 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001306 continue;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001307 if (event->cpu != -1 && event->cpu != smp_processor_id())
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001308 continue;
1309
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001310 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001311 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001312
1313 /*
1314 * If this pinned group hasn't been scheduled,
1315 * put it in error state.
1316 */
1317 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1318 update_group_times(event);
1319 event->state = PERF_EVENT_STATE_ERROR;
1320 }
1321 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001322}
1323
1324static void
1325ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001326 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001327{
1328 struct perf_event *event;
1329 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001330
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001331 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1332 /* Ignore events in OFF or ERROR state */
1333 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001334 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001335 /*
1336 * Listen to the 'cpu' scheduling filter constraint
1337 * of events:
1338 */
Peter Zijlstra6e377382010-02-11 13:21:58 +01001339 if (event->cpu != -1 && event->cpu != smp_processor_id())
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001340 continue;
1341
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001342 if (group_can_go_on(event, cpuctx, can_add_hw))
Peter Zijlstra6e377382010-02-11 13:21:58 +01001343 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001344 can_add_hw = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001345 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001346}
1347
1348static void
1349ctx_sched_in(struct perf_event_context *ctx,
1350 struct perf_cpu_context *cpuctx,
1351 enum event_type_t event_type)
1352{
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001353 raw_spin_lock(&ctx->lock);
1354 ctx->is_active = 1;
1355 if (likely(!ctx->nr_events))
1356 goto out;
1357
1358 ctx->timestamp = perf_clock();
1359
1360 perf_disable();
1361
1362 /*
1363 * First go through the list and put on any pinned groups
1364 * in order to give them the best chance of going on.
1365 */
1366 if (event_type & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001367 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001368
1369 /* Then walk through the lower prio flexible groups */
1370 if (event_type & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001371 ctx_flexible_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001372
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001373 perf_enable();
1374 out:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001375 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001376}
1377
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001378static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1379 enum event_type_t event_type)
1380{
1381 struct perf_event_context *ctx = &cpuctx->ctx;
1382
1383 ctx_sched_in(ctx, cpuctx, event_type);
1384}
1385
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001386static void task_ctx_sched_in(struct task_struct *task,
1387 enum event_type_t event_type)
1388{
1389 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1390 struct perf_event_context *ctx = task->perf_event_ctxp;
1391
1392 if (likely(!ctx))
1393 return;
1394 if (cpuctx->task_ctx == ctx)
1395 return;
1396 ctx_sched_in(ctx, cpuctx, event_type);
1397 cpuctx->task_ctx = ctx;
1398}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001399/*
1400 * Called from scheduler to add the events of the current task
1401 * with interrupts disabled.
1402 *
1403 * We restore the event value and then enable it.
1404 *
1405 * This does not protect us against NMI, but enable()
1406 * sets the enabled bit in the control field of event _before_
1407 * accessing the event control register. If a NMI hits, then it will
1408 * keep the event running.
1409 */
Peter Zijlstra49f47432009-12-27 11:51:52 +01001410void perf_event_task_sched_in(struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001411{
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001412 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1413 struct perf_event_context *ctx = task->perf_event_ctxp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001414
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001415 if (likely(!ctx))
1416 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001417
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001418 if (cpuctx->task_ctx == ctx)
1419 return;
1420
eranian@google.com9b33fa62010-03-10 22:26:05 -08001421 perf_disable();
1422
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01001423 /*
1424 * We want to keep the following priority order:
1425 * cpu pinned (that don't need to move), task pinned,
1426 * cpu flexible, task flexible.
1427 */
1428 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1429
1430 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1431 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1432 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1433
1434 cpuctx->task_ctx = ctx;
eranian@google.com9b33fa62010-03-10 22:26:05 -08001435
1436 perf_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001437}
1438
1439#define MAX_INTERRUPTS (~0ULL)
1440
1441static void perf_log_throttle(struct perf_event *event, int enable);
1442
Peter Zijlstraabd50712010-01-26 18:50:16 +01001443static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1444{
1445 u64 frequency = event->attr.sample_freq;
1446 u64 sec = NSEC_PER_SEC;
1447 u64 divisor, dividend;
1448
1449 int count_fls, nsec_fls, frequency_fls, sec_fls;
1450
1451 count_fls = fls64(count);
1452 nsec_fls = fls64(nsec);
1453 frequency_fls = fls64(frequency);
1454 sec_fls = 30;
1455
1456 /*
1457 * We got @count in @nsec, with a target of sample_freq HZ
1458 * the target period becomes:
1459 *
1460 * @count * 10^9
1461 * period = -------------------
1462 * @nsec * sample_freq
1463 *
1464 */
1465
1466 /*
1467 * Reduce accuracy by one bit such that @a and @b converge
1468 * to a similar magnitude.
1469 */
1470#define REDUCE_FLS(a, b) \
1471do { \
1472 if (a##_fls > b##_fls) { \
1473 a >>= 1; \
1474 a##_fls--; \
1475 } else { \
1476 b >>= 1; \
1477 b##_fls--; \
1478 } \
1479} while (0)
1480
1481 /*
1482 * Reduce accuracy until either term fits in a u64, then proceed with
1483 * the other, so that finally we can do a u64/u64 division.
1484 */
1485 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1486 REDUCE_FLS(nsec, frequency);
1487 REDUCE_FLS(sec, count);
1488 }
1489
1490 if (count_fls + sec_fls > 64) {
1491 divisor = nsec * frequency;
1492
1493 while (count_fls + sec_fls > 64) {
1494 REDUCE_FLS(count, sec);
1495 divisor >>= 1;
1496 }
1497
1498 dividend = count * sec;
1499 } else {
1500 dividend = count * sec;
1501
1502 while (nsec_fls + frequency_fls > 64) {
1503 REDUCE_FLS(nsec, frequency);
1504 dividend >>= 1;
1505 }
1506
1507 divisor = nsec * frequency;
1508 }
1509
Peter Zijlstraf6ab91ad2010-06-04 15:18:01 +02001510 if (!divisor)
1511 return dividend;
1512
Peter Zijlstraabd50712010-01-26 18:50:16 +01001513 return div64_u64(dividend, divisor);
1514}
1515
Stephane Eraniand76a0812010-02-08 17:06:01 +02001516static void perf_event_stop(struct perf_event *event)
1517{
1518 if (!event->pmu->stop)
1519 return event->pmu->disable(event);
1520
1521 return event->pmu->stop(event);
1522}
1523
1524static int perf_event_start(struct perf_event *event)
1525{
1526 if (!event->pmu->start)
1527 return event->pmu->enable(event);
1528
1529 return event->pmu->start(event);
1530}
1531
Peter Zijlstraabd50712010-01-26 18:50:16 +01001532static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001533{
1534 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91ad2010-06-04 15:18:01 +02001535 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001536 s64 delta;
1537
Peter Zijlstraabd50712010-01-26 18:50:16 +01001538 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001539
1540 delta = (s64)(period - hwc->sample_period);
1541 delta = (delta + 7) / 8; /* low pass filter */
1542
1543 sample_period = hwc->sample_period + delta;
1544
1545 if (!sample_period)
1546 sample_period = 1;
1547
1548 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001549
1550 if (atomic64_read(&hwc->period_left) > 8*sample_period) {
1551 perf_disable();
Stephane Eraniand76a0812010-02-08 17:06:01 +02001552 perf_event_stop(event);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001553 atomic64_set(&hwc->period_left, 0);
Stephane Eraniand76a0812010-02-08 17:06:01 +02001554 perf_event_start(event);
Peter Zijlstraabd50712010-01-26 18:50:16 +01001555 perf_enable();
1556 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001557}
1558
1559static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1560{
1561 struct perf_event *event;
1562 struct hw_perf_event *hwc;
Peter Zijlstraabd50712010-01-26 18:50:16 +01001563 u64 interrupts, now;
1564 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001565
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001566 raw_spin_lock(&ctx->lock);
Paul Mackerras03541f82009-10-14 16:58:03 +11001567 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001568 if (event->state != PERF_EVENT_STATE_ACTIVE)
1569 continue;
1570
Peter Zijlstra5d27c232009-12-17 13:16:32 +01001571 if (event->cpu != -1 && event->cpu != smp_processor_id())
1572 continue;
1573
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001574 hwc = &event->hw;
1575
1576 interrupts = hwc->interrupts;
1577 hwc->interrupts = 0;
1578
1579 /*
1580 * unthrottle events on the tick
1581 */
1582 if (interrupts == MAX_INTERRUPTS) {
1583 perf_log_throttle(event, 1);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001584 perf_disable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001585 event->pmu->unthrottle(event);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001586 perf_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001587 }
1588
1589 if (!event->attr.freq || !event->attr.sample_freq)
1590 continue;
1591
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001592 perf_disable();
Peter Zijlstraabd50712010-01-26 18:50:16 +01001593 event->pmu->read(event);
1594 now = atomic64_read(&event->count);
1595 delta = now - hwc->freq_count_stamp;
1596 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001597
Peter Zijlstraabd50712010-01-26 18:50:16 +01001598 if (delta > 0)
1599 perf_adjust_period(event, TICK_NSEC, delta);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001600 perf_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001601 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001602 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001603}
1604
1605/*
1606 * Round-robin a context's events:
1607 */
1608static void rotate_ctx(struct perf_event_context *ctx)
1609{
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001610 raw_spin_lock(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001611
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001612 /* Rotate the first entry last of non-pinned groups */
Frederic Weisbeckere2864172010-01-09 21:05:28 +01001613 list_rotate_left(&ctx->flexible_groups);
1614
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001615 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001616}
1617
Peter Zijlstra49f47432009-12-27 11:51:52 +01001618void perf_event_task_tick(struct task_struct *curr)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001619{
1620 struct perf_cpu_context *cpuctx;
1621 struct perf_event_context *ctx;
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001622 int rotate = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001623
1624 if (!atomic_read(&nr_events))
1625 return;
1626
Peter Zijlstra49f47432009-12-27 11:51:52 +01001627 cpuctx = &__get_cpu_var(perf_cpu_context);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001628 if (cpuctx->ctx.nr_events &&
1629 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1630 rotate = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001631
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001632 ctx = curr->perf_event_ctxp;
1633 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1634 rotate = 1;
Peter Zijlstra9717e6c2010-01-28 13:57:44 +01001635
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001636 perf_ctx_adjust_freq(&cpuctx->ctx);
1637 if (ctx)
1638 perf_ctx_adjust_freq(ctx);
1639
Peter Zijlstrad4944a02010-03-08 13:51:20 +01001640 if (!rotate)
1641 return;
1642
1643 perf_disable();
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001644 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001645 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001646 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001647
1648 rotate_ctx(&cpuctx->ctx);
1649 if (ctx)
1650 rotate_ctx(ctx);
1651
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001652 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001653 if (ctx)
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01001654 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
Peter Zijlstra9717e6c2010-01-28 13:57:44 +01001655 perf_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001656}
1657
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001658static int event_enable_on_exec(struct perf_event *event,
1659 struct perf_event_context *ctx)
1660{
1661 if (!event->attr.enable_on_exec)
1662 return 0;
1663
1664 event->attr.enable_on_exec = 0;
1665 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1666 return 0;
1667
1668 __perf_event_mark_enabled(event, ctx);
1669
1670 return 1;
1671}
1672
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001673/*
1674 * Enable all of a task's events that have been marked enable-on-exec.
1675 * This expects task == current.
1676 */
1677static void perf_event_enable_on_exec(struct task_struct *task)
1678{
1679 struct perf_event_context *ctx;
1680 struct perf_event *event;
1681 unsigned long flags;
1682 int enabled = 0;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001683 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001684
1685 local_irq_save(flags);
1686 ctx = task->perf_event_ctxp;
1687 if (!ctx || !ctx->nr_events)
1688 goto out;
1689
1690 __perf_event_task_sched_out(ctx);
1691
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001692 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001693
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001694 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1695 ret = event_enable_on_exec(event, ctx);
1696 if (ret)
1697 enabled = 1;
1698 }
1699
1700 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1701 ret = event_enable_on_exec(event, ctx);
1702 if (ret)
1703 enabled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001704 }
1705
1706 /*
1707 * Unclone this context if we enabled any event.
1708 */
1709 if (enabled)
1710 unclone_ctx(ctx);
1711
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001712 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001713
Peter Zijlstra49f47432009-12-27 11:51:52 +01001714 perf_event_task_sched_in(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001715 out:
1716 local_irq_restore(flags);
1717}
1718
1719/*
1720 * Cross CPU call to read the hardware event
1721 */
1722static void __perf_event_read(void *info)
1723{
1724 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1725 struct perf_event *event = info;
1726 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001727
1728 /*
1729 * If this is a task context, we need to check whether it is
1730 * the current task context of this cpu. If not it has been
1731 * scheduled out before the smp call arrived. In that case
1732 * event->count would have been updated to a recent sample
1733 * when the event was scheduled out.
1734 */
1735 if (ctx->task && cpuctx->task_ctx != ctx)
1736 return;
1737
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001738 raw_spin_lock(&ctx->lock);
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001739 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001740 update_event_times(event);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001741 raw_spin_unlock(&ctx->lock);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001742
Peter Zijlstra58e5ad12009-11-20 22:19:53 +01001743 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001744}
1745
1746static u64 perf_event_read(struct perf_event *event)
1747{
1748 /*
1749 * If event is enabled and currently active on a CPU, update the
1750 * value in the event structure:
1751 */
1752 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1753 smp_call_function_single(event->oncpu,
1754 __perf_event_read, event, 1);
1755 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001756 struct perf_event_context *ctx = event->ctx;
1757 unsigned long flags;
1758
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001759 raw_spin_lock_irqsave(&ctx->lock, flags);
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01001760 update_context_time(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001761 update_event_times(event);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001762 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001763 }
1764
1765 return atomic64_read(&event->count);
1766}
1767
1768/*
1769 * Initialize the perf_event context in a task_struct:
1770 */
1771static void
1772__perf_event_init_context(struct perf_event_context *ctx,
1773 struct task_struct *task)
1774{
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001775 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001776 mutex_init(&ctx->mutex);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001777 INIT_LIST_HEAD(&ctx->pinned_groups);
1778 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001779 INIT_LIST_HEAD(&ctx->event_list);
1780 atomic_set(&ctx->refcount, 1);
1781 ctx->task = task;
1782}
1783
1784static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1785{
1786 struct perf_event_context *ctx;
1787 struct perf_cpu_context *cpuctx;
1788 struct task_struct *task;
1789 unsigned long flags;
1790 int err;
1791
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001792 if (pid == -1 && cpu != -1) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001793 /* Must be root to operate on a CPU event: */
1794 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1795 return ERR_PTR(-EACCES);
1796
Paul Mackerras0f624e72009-12-15 19:40:32 +11001797 if (cpu < 0 || cpu >= nr_cpumask_bits)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001798 return ERR_PTR(-EINVAL);
1799
1800 /*
1801 * We could be clever and allow to attach a event to an
1802 * offline CPU and activate it when the CPU comes up, but
1803 * that's for later.
1804 */
Rusty Russellf6325e32009-12-17 11:43:08 -06001805 if (!cpu_online(cpu))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001806 return ERR_PTR(-ENODEV);
1807
1808 cpuctx = &per_cpu(perf_cpu_context, cpu);
1809 ctx = &cpuctx->ctx;
1810 get_ctx(ctx);
1811
1812 return ctx;
1813 }
1814
1815 rcu_read_lock();
1816 if (!pid)
1817 task = current;
1818 else
1819 task = find_task_by_vpid(pid);
1820 if (task)
1821 get_task_struct(task);
1822 rcu_read_unlock();
1823
1824 if (!task)
1825 return ERR_PTR(-ESRCH);
1826
1827 /*
1828 * Can't attach events to a dying task.
1829 */
1830 err = -ESRCH;
1831 if (task->flags & PF_EXITING)
1832 goto errout;
1833
1834 /* Reuse ptrace permission checks for now. */
1835 err = -EACCES;
1836 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1837 goto errout;
1838
1839 retry:
1840 ctx = perf_lock_task_context(task, &flags);
1841 if (ctx) {
1842 unclone_ctx(ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001843 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001844 }
1845
1846 if (!ctx) {
Xiao Guangrongaa5452d2009-12-09 11:28:13 +08001847 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001848 err = -ENOMEM;
1849 if (!ctx)
1850 goto errout;
1851 __perf_event_init_context(ctx, task);
1852 get_ctx(ctx);
1853 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1854 /*
1855 * We raced with some other task; use
1856 * the context they set.
1857 */
1858 kfree(ctx);
1859 goto retry;
1860 }
1861 get_task_struct(task);
1862 }
1863
1864 put_task_struct(task);
1865 return ctx;
1866
1867 errout:
1868 put_task_struct(task);
1869 return ERR_PTR(err);
1870}
1871
Li Zefan6fb29152009-10-15 11:21:42 +08001872static void perf_event_free_filter(struct perf_event *event);
1873
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001874static void free_event_rcu(struct rcu_head *head)
1875{
1876 struct perf_event *event;
1877
1878 event = container_of(head, struct perf_event, rcu_head);
1879 if (event->ns)
1880 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08001881 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001882 kfree(event);
1883}
1884
1885static void perf_pending_sync(struct perf_event *event);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02001886static void perf_mmap_data_put(struct perf_mmap_data *data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001887
1888static void free_event(struct perf_event *event)
1889{
1890 perf_pending_sync(event);
1891
1892 if (!event->parent) {
1893 atomic_dec(&nr_events);
1894 if (event->attr.mmap)
1895 atomic_dec(&nr_mmap_events);
1896 if (event->attr.comm)
1897 atomic_dec(&nr_comm_events);
1898 if (event->attr.task)
1899 atomic_dec(&nr_task_events);
1900 }
1901
Peter Zijlstraac9721f2010-05-27 12:54:41 +02001902 if (event->data) {
1903 perf_mmap_data_put(event->data);
1904 event->data = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001905 }
1906
1907 if (event->destroy)
1908 event->destroy(event);
1909
1910 put_ctx(event->ctx);
1911 call_rcu(&event->rcu_head, free_event_rcu);
1912}
1913
Arjan van de Venfb0459d2009-09-25 12:25:56 +02001914int perf_event_release_kernel(struct perf_event *event)
1915{
1916 struct perf_event_context *ctx = event->ctx;
1917
Peter Zijlstra050735b2010-05-11 11:51:53 +02001918 /*
1919 * Remove from the PMU, can't get re-enabled since we got
1920 * here because the last ref went.
1921 */
1922 perf_event_disable(event);
1923
Arjan van de Venfb0459d2009-09-25 12:25:56 +02001924 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa0507c82010-05-06 15:42:53 +02001925 /*
1926 * There are two ways this annotation is useful:
1927 *
1928 * 1) there is a lock recursion from perf_event_exit_task
1929 * see the comment there.
1930 *
1931 * 2) there is a lock-inversion with mmap_sem through
1932 * perf_event_read_group(), which takes faults while
1933 * holding ctx->mutex, however this is called after
1934 * the last filedesc died, so there is no possibility
1935 * to trigger the AB-BA case.
1936 */
1937 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
Peter Zijlstra050735b2010-05-11 11:51:53 +02001938 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +02001939 perf_group_detach(event);
Peter Zijlstra050735b2010-05-11 11:51:53 +02001940 list_del_event(event, ctx);
Peter Zijlstra050735b2010-05-11 11:51:53 +02001941 raw_spin_unlock_irq(&ctx->lock);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02001942 mutex_unlock(&ctx->mutex);
1943
1944 mutex_lock(&event->owner->perf_event_mutex);
1945 list_del_init(&event->owner_entry);
1946 mutex_unlock(&event->owner->perf_event_mutex);
1947 put_task_struct(event->owner);
1948
1949 free_event(event);
1950
1951 return 0;
1952}
1953EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1954
Peter Zijlstraa66a3052009-11-23 11:37:23 +01001955/*
1956 * Called when the last reference to the file is gone.
1957 */
1958static int perf_release(struct inode *inode, struct file *file)
1959{
1960 struct perf_event *event = file->private_data;
1961
1962 file->private_data = NULL;
1963
1964 return perf_event_release_kernel(event);
1965}
1966
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001967static int perf_event_read_size(struct perf_event *event)
1968{
1969 int entry = sizeof(u64); /* value */
1970 int size = 0;
1971 int nr = 1;
1972
1973 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1974 size += sizeof(u64);
1975
1976 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1977 size += sizeof(u64);
1978
1979 if (event->attr.read_format & PERF_FORMAT_ID)
1980 entry += sizeof(u64);
1981
1982 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1983 nr += event->group_leader->nr_siblings;
1984 size += sizeof(u64);
1985 }
1986
1987 size += entry * nr;
1988
1989 return size;
1990}
1991
Peter Zijlstra59ed4462009-11-20 22:19:55 +01001992u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001993{
1994 struct perf_event *child;
1995 u64 total = 0;
1996
Peter Zijlstra59ed4462009-11-20 22:19:55 +01001997 *enabled = 0;
1998 *running = 0;
1999
Peter Zijlstra6f105812009-11-20 22:19:56 +01002000 mutex_lock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002001 total += perf_event_read(event);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002002 *enabled += event->total_time_enabled +
2003 atomic64_read(&event->child_total_time_enabled);
2004 *running += event->total_time_running +
2005 atomic64_read(&event->child_total_time_running);
2006
2007 list_for_each_entry(child, &event->child_list, child_list) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002008 total += perf_event_read(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002009 *enabled += child->total_time_enabled;
2010 *running += child->total_time_running;
2011 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002012 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002013
2014 return total;
2015}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002016EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002017
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002018static int perf_event_read_group(struct perf_event *event,
2019 u64 read_format, char __user *buf)
2020{
2021 struct perf_event *leader = event->group_leader, *sub;
Peter Zijlstra6f105812009-11-20 22:19:56 +01002022 int n = 0, size = 0, ret = -EFAULT;
2023 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002024 u64 values[5];
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002025 u64 count, enabled, running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002026
Peter Zijlstra6f105812009-11-20 22:19:56 +01002027 mutex_lock(&ctx->mutex);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002028 count = perf_event_read_value(leader, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002029
2030 values[n++] = 1 + leader->nr_siblings;
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002031 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2032 values[n++] = enabled;
2033 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2034 values[n++] = running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01002035 values[n++] = count;
2036 if (read_format & PERF_FORMAT_ID)
2037 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002038
2039 size = n * sizeof(u64);
2040
2041 if (copy_to_user(buf, values, size))
Peter Zijlstra6f105812009-11-20 22:19:56 +01002042 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002043
Peter Zijlstra6f105812009-11-20 22:19:56 +01002044 ret = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002045
2046 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstraabf48682009-11-20 22:19:49 +01002047 n = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002048
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002049 values[n++] = perf_event_read_value(sub, &enabled, &running);
Peter Zijlstraabf48682009-11-20 22:19:49 +01002050 if (read_format & PERF_FORMAT_ID)
2051 values[n++] = primary_event_id(sub);
2052
2053 size = n * sizeof(u64);
2054
Stephane Eranian184d3da2009-11-23 21:40:49 -08002055 if (copy_to_user(buf + ret, values, size)) {
Peter Zijlstra6f105812009-11-20 22:19:56 +01002056 ret = -EFAULT;
2057 goto unlock;
2058 }
Peter Zijlstraabf48682009-11-20 22:19:49 +01002059
2060 ret += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002061 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01002062unlock:
2063 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002064
Peter Zijlstraabf48682009-11-20 22:19:49 +01002065 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002066}
2067
2068static int perf_event_read_one(struct perf_event *event,
2069 u64 read_format, char __user *buf)
2070{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002071 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002072 u64 values[4];
2073 int n = 0;
2074
Peter Zijlstra59ed4462009-11-20 22:19:55 +01002075 values[n++] = perf_event_read_value(event, &enabled, &running);
2076 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2077 values[n++] = enabled;
2078 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2079 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002080 if (read_format & PERF_FORMAT_ID)
2081 values[n++] = primary_event_id(event);
2082
2083 if (copy_to_user(buf, values, n * sizeof(u64)))
2084 return -EFAULT;
2085
2086 return n * sizeof(u64);
2087}
2088
2089/*
2090 * Read the performance event - simple non blocking version for now
2091 */
2092static ssize_t
2093perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2094{
2095 u64 read_format = event->attr.read_format;
2096 int ret;
2097
2098 /*
2099 * Return end-of-file for a read on a event that is in
2100 * error state (i.e. because it was pinned but it couldn't be
2101 * scheduled on to the CPU at some point).
2102 */
2103 if (event->state == PERF_EVENT_STATE_ERROR)
2104 return 0;
2105
2106 if (count < perf_event_read_size(event))
2107 return -ENOSPC;
2108
2109 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002110 if (read_format & PERF_FORMAT_GROUP)
2111 ret = perf_event_read_group(event, read_format, buf);
2112 else
2113 ret = perf_event_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002114
2115 return ret;
2116}
2117
2118static ssize_t
2119perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2120{
2121 struct perf_event *event = file->private_data;
2122
2123 return perf_read_hw(event, buf, count);
2124}
2125
2126static unsigned int perf_poll(struct file *file, poll_table *wait)
2127{
2128 struct perf_event *event = file->private_data;
2129 struct perf_mmap_data *data;
2130 unsigned int events = POLL_HUP;
2131
2132 rcu_read_lock();
2133 data = rcu_dereference(event->data);
2134 if (data)
2135 events = atomic_xchg(&data->poll, 0);
2136 rcu_read_unlock();
2137
2138 poll_wait(file, &event->waitq, wait);
2139
2140 return events;
2141}
2142
2143static void perf_event_reset(struct perf_event *event)
2144{
2145 (void)perf_event_read(event);
2146 atomic64_set(&event->count, 0);
2147 perf_event_update_userpage(event);
2148}
2149
2150/*
2151 * Holding the top-level event's child_mutex means that any
2152 * descendant process that has inherited this event will block
2153 * in sync_child_event if it goes to exit, thus satisfying the
2154 * task existence requirements of perf_event_enable/disable.
2155 */
2156static void perf_event_for_each_child(struct perf_event *event,
2157 void (*func)(struct perf_event *))
2158{
2159 struct perf_event *child;
2160
2161 WARN_ON_ONCE(event->ctx->parent_ctx);
2162 mutex_lock(&event->child_mutex);
2163 func(event);
2164 list_for_each_entry(child, &event->child_list, child_list)
2165 func(child);
2166 mutex_unlock(&event->child_mutex);
2167}
2168
2169static void perf_event_for_each(struct perf_event *event,
2170 void (*func)(struct perf_event *))
2171{
2172 struct perf_event_context *ctx = event->ctx;
2173 struct perf_event *sibling;
2174
2175 WARN_ON_ONCE(ctx->parent_ctx);
2176 mutex_lock(&ctx->mutex);
2177 event = event->group_leader;
2178
2179 perf_event_for_each_child(event, func);
2180 func(event);
2181 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2182 perf_event_for_each_child(event, func);
2183 mutex_unlock(&ctx->mutex);
2184}
2185
2186static int perf_event_period(struct perf_event *event, u64 __user *arg)
2187{
2188 struct perf_event_context *ctx = event->ctx;
2189 unsigned long size;
2190 int ret = 0;
2191 u64 value;
2192
2193 if (!event->attr.sample_period)
2194 return -EINVAL;
2195
2196 size = copy_from_user(&value, arg, sizeof(value));
2197 if (size != sizeof(value))
2198 return -EFAULT;
2199
2200 if (!value)
2201 return -EINVAL;
2202
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002203 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002204 if (event->attr.freq) {
2205 if (value > sysctl_perf_event_sample_rate) {
2206 ret = -EINVAL;
2207 goto unlock;
2208 }
2209
2210 event->attr.sample_freq = value;
2211 } else {
2212 event->attr.sample_period = value;
2213 event->hw.sample_period = value;
2214 }
2215unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002216 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002217
2218 return ret;
2219}
2220
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002221static const struct file_operations perf_fops;
2222
2223static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2224{
2225 struct file *file;
2226
2227 file = fget_light(fd, fput_needed);
2228 if (!file)
2229 return ERR_PTR(-EBADF);
2230
2231 if (file->f_op != &perf_fops) {
2232 fput_light(file, *fput_needed);
2233 *fput_needed = 0;
2234 return ERR_PTR(-EBADF);
2235 }
2236
2237 return file->private_data;
2238}
2239
2240static int perf_event_set_output(struct perf_event *event,
2241 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08002242static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002243
2244static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2245{
2246 struct perf_event *event = file->private_data;
2247 void (*func)(struct perf_event *);
2248 u32 flags = arg;
2249
2250 switch (cmd) {
2251 case PERF_EVENT_IOC_ENABLE:
2252 func = perf_event_enable;
2253 break;
2254 case PERF_EVENT_IOC_DISABLE:
2255 func = perf_event_disable;
2256 break;
2257 case PERF_EVENT_IOC_RESET:
2258 func = perf_event_reset;
2259 break;
2260
2261 case PERF_EVENT_IOC_REFRESH:
2262 return perf_event_refresh(event, arg);
2263
2264 case PERF_EVENT_IOC_PERIOD:
2265 return perf_event_period(event, (u64 __user *)arg);
2266
2267 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002268 {
2269 struct perf_event *output_event = NULL;
2270 int fput_needed = 0;
2271 int ret;
2272
2273 if (arg != -1) {
2274 output_event = perf_fget_light(arg, &fput_needed);
2275 if (IS_ERR(output_event))
2276 return PTR_ERR(output_event);
2277 }
2278
2279 ret = perf_event_set_output(event, output_event);
2280 if (output_event)
2281 fput_light(output_event->filp, fput_needed);
2282
2283 return ret;
2284 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002285
Li Zefan6fb29152009-10-15 11:21:42 +08002286 case PERF_EVENT_IOC_SET_FILTER:
2287 return perf_event_set_filter(event, (void __user *)arg);
2288
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002289 default:
2290 return -ENOTTY;
2291 }
2292
2293 if (flags & PERF_IOC_FLAG_GROUP)
2294 perf_event_for_each(event, func);
2295 else
2296 perf_event_for_each_child(event, func);
2297
2298 return 0;
2299}
2300
2301int perf_event_task_enable(void)
2302{
2303 struct perf_event *event;
2304
2305 mutex_lock(&current->perf_event_mutex);
2306 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2307 perf_event_for_each_child(event, perf_event_enable);
2308 mutex_unlock(&current->perf_event_mutex);
2309
2310 return 0;
2311}
2312
2313int perf_event_task_disable(void)
2314{
2315 struct perf_event *event;
2316
2317 mutex_lock(&current->perf_event_mutex);
2318 list_for_each_entry(event, &current->perf_event_list, owner_entry)
2319 perf_event_for_each_child(event, perf_event_disable);
2320 mutex_unlock(&current->perf_event_mutex);
2321
2322 return 0;
2323}
2324
2325#ifndef PERF_EVENT_INDEX_OFFSET
2326# define PERF_EVENT_INDEX_OFFSET 0
2327#endif
2328
2329static int perf_event_index(struct perf_event *event)
2330{
2331 if (event->state != PERF_EVENT_STATE_ACTIVE)
2332 return 0;
2333
2334 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2335}
2336
2337/*
2338 * Callers need to ensure there can be no nesting of this function, otherwise
2339 * the seqlock logic goes bad. We can not serialize this because the arch
2340 * code calls this from NMI context.
2341 */
2342void perf_event_update_userpage(struct perf_event *event)
2343{
2344 struct perf_event_mmap_page *userpg;
2345 struct perf_mmap_data *data;
2346
2347 rcu_read_lock();
2348 data = rcu_dereference(event->data);
2349 if (!data)
2350 goto unlock;
2351
2352 userpg = data->user_page;
2353
2354 /*
2355 * Disable preemption so as to not let the corresponding user-space
2356 * spin too long if we get preempted.
2357 */
2358 preempt_disable();
2359 ++userpg->lock;
2360 barrier();
2361 userpg->index = perf_event_index(event);
2362 userpg->offset = atomic64_read(&event->count);
2363 if (event->state == PERF_EVENT_STATE_ACTIVE)
2364 userpg->offset -= atomic64_read(&event->hw.prev_count);
2365
2366 userpg->time_enabled = event->total_time_enabled +
2367 atomic64_read(&event->child_total_time_enabled);
2368
2369 userpg->time_running = event->total_time_running +
2370 atomic64_read(&event->child_total_time_running);
2371
2372 barrier();
2373 ++userpg->lock;
2374 preempt_enable();
2375unlock:
2376 rcu_read_unlock();
2377}
2378
Peter Zijlstra906010b2009-09-21 16:08:49 +02002379#ifndef CONFIG_PERF_USE_VMALLOC
2380
2381/*
2382 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2383 */
2384
2385static struct page *
2386perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2387{
2388 if (pgoff > data->nr_pages)
2389 return NULL;
2390
2391 if (pgoff == 0)
2392 return virt_to_page(data->user_page);
2393
2394 return virt_to_page(data->data_pages[pgoff - 1]);
2395}
2396
Peter Zijlstraa19d35c2010-05-17 18:48:00 +02002397static void *perf_mmap_alloc_page(int cpu)
2398{
2399 struct page *page;
2400 int node;
2401
2402 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2403 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2404 if (!page)
2405 return NULL;
2406
2407 return page_address(page);
2408}
2409
Peter Zijlstra906010b2009-09-21 16:08:49 +02002410static struct perf_mmap_data *
2411perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002412{
2413 struct perf_mmap_data *data;
2414 unsigned long size;
2415 int i;
2416
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002417 size = sizeof(struct perf_mmap_data);
2418 size += nr_pages * sizeof(void *);
2419
2420 data = kzalloc(size, GFP_KERNEL);
2421 if (!data)
2422 goto fail;
2423
Peter Zijlstraa19d35c2010-05-17 18:48:00 +02002424 data->user_page = perf_mmap_alloc_page(event->cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002425 if (!data->user_page)
2426 goto fail_user_page;
2427
2428 for (i = 0; i < nr_pages; i++) {
Peter Zijlstraa19d35c2010-05-17 18:48:00 +02002429 data->data_pages[i] = perf_mmap_alloc_page(event->cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002430 if (!data->data_pages[i])
2431 goto fail_data_pages;
2432 }
2433
2434 data->nr_pages = nr_pages;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002435
Peter Zijlstra906010b2009-09-21 16:08:49 +02002436 return data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002437
2438fail_data_pages:
2439 for (i--; i >= 0; i--)
2440 free_page((unsigned long)data->data_pages[i]);
2441
2442 free_page((unsigned long)data->user_page);
2443
2444fail_user_page:
2445 kfree(data);
2446
2447fail:
Peter Zijlstra906010b2009-09-21 16:08:49 +02002448 return NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002449}
2450
2451static void perf_mmap_free_page(unsigned long addr)
2452{
2453 struct page *page = virt_to_page((void *)addr);
2454
2455 page->mapping = NULL;
2456 __free_page(page);
2457}
2458
Peter Zijlstra906010b2009-09-21 16:08:49 +02002459static void perf_mmap_data_free(struct perf_mmap_data *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002460{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002461 int i;
2462
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002463 perf_mmap_free_page((unsigned long)data->user_page);
2464 for (i = 0; i < data->nr_pages; i++)
2465 perf_mmap_free_page((unsigned long)data->data_pages[i]);
Kristian Høgsbergec70ccd2009-12-01 15:05:01 -05002466 kfree(data);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002467}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002468
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002469static inline int page_order(struct perf_mmap_data *data)
2470{
2471 return 0;
2472}
2473
Peter Zijlstra906010b2009-09-21 16:08:49 +02002474#else
2475
2476/*
2477 * Back perf_mmap() with vmalloc memory.
2478 *
2479 * Required for architectures that have d-cache aliasing issues.
2480 */
2481
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002482static inline int page_order(struct perf_mmap_data *data)
2483{
2484 return data->page_order;
2485}
2486
Peter Zijlstra906010b2009-09-21 16:08:49 +02002487static struct page *
2488perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2489{
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002490 if (pgoff > (1UL << page_order(data)))
Peter Zijlstra906010b2009-09-21 16:08:49 +02002491 return NULL;
2492
2493 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2494}
2495
2496static void perf_mmap_unmark_page(void *addr)
2497{
2498 struct page *page = vmalloc_to_page(addr);
2499
2500 page->mapping = NULL;
2501}
2502
2503static void perf_mmap_data_free_work(struct work_struct *work)
2504{
2505 struct perf_mmap_data *data;
2506 void *base;
2507 int i, nr;
2508
2509 data = container_of(work, struct perf_mmap_data, work);
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002510 nr = 1 << page_order(data);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002511
2512 base = data->user_page;
2513 for (i = 0; i < nr + 1; i++)
2514 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2515
2516 vfree(base);
Kristian Høgsbergec70ccd2009-12-01 15:05:01 -05002517 kfree(data);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002518}
2519
2520static void perf_mmap_data_free(struct perf_mmap_data *data)
2521{
2522 schedule_work(&data->work);
2523}
2524
2525static struct perf_mmap_data *
2526perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2527{
2528 struct perf_mmap_data *data;
2529 unsigned long size;
2530 void *all_buf;
2531
Peter Zijlstra906010b2009-09-21 16:08:49 +02002532 size = sizeof(struct perf_mmap_data);
2533 size += sizeof(void *);
2534
2535 data = kzalloc(size, GFP_KERNEL);
2536 if (!data)
2537 goto fail;
2538
2539 INIT_WORK(&data->work, perf_mmap_data_free_work);
2540
2541 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2542 if (!all_buf)
2543 goto fail_all_buf;
2544
2545 data->user_page = all_buf;
2546 data->data_pages[0] = all_buf + PAGE_SIZE;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002547 data->page_order = ilog2(nr_pages);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002548 data->nr_pages = 1;
2549
2550 return data;
2551
2552fail_all_buf:
2553 kfree(data);
2554
2555fail:
2556 return NULL;
2557}
2558
2559#endif
2560
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02002561static unsigned long perf_data_size(struct perf_mmap_data *data)
2562{
2563 return data->nr_pages << (PAGE_SHIFT + page_order(data));
2564}
2565
Peter Zijlstra906010b2009-09-21 16:08:49 +02002566static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2567{
2568 struct perf_event *event = vma->vm_file->private_data;
2569 struct perf_mmap_data *data;
2570 int ret = VM_FAULT_SIGBUS;
2571
2572 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2573 if (vmf->pgoff == 0)
2574 ret = 0;
2575 return ret;
2576 }
2577
2578 rcu_read_lock();
2579 data = rcu_dereference(event->data);
2580 if (!data)
2581 goto unlock;
2582
2583 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2584 goto unlock;
2585
2586 vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2587 if (!vmf->page)
2588 goto unlock;
2589
2590 get_page(vmf->page);
2591 vmf->page->mapping = vma->vm_file->f_mapping;
2592 vmf->page->index = vmf->pgoff;
2593
2594 ret = 0;
2595unlock:
2596 rcu_read_unlock();
2597
2598 return ret;
2599}
2600
2601static void
2602perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2603{
2604 long max_size = perf_data_size(data);
2605
Peter Zijlstra906010b2009-09-21 16:08:49 +02002606 if (event->attr.watermark) {
2607 data->watermark = min_t(long, max_size,
2608 event->attr.wakeup_watermark);
2609 }
2610
2611 if (!data->watermark)
Stephane Eranian8904b182009-11-20 22:19:57 +01002612 data->watermark = max_size / 2;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002613
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002614 atomic_set(&data->refcount, 1);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002615 rcu_assign_pointer(event->data, data);
2616}
2617
2618static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2619{
2620 struct perf_mmap_data *data;
2621
2622 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2623 perf_mmap_data_free(data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002624}
2625
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002626static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002627{
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002628 struct perf_mmap_data *data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002629
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002630 rcu_read_lock();
2631 data = rcu_dereference(event->data);
2632 if (data) {
2633 if (!atomic_inc_not_zero(&data->refcount))
2634 data = NULL;
2635 }
2636 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002637
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002638 return data;
2639}
2640
2641static void perf_mmap_data_put(struct perf_mmap_data *data)
2642{
2643 if (!atomic_dec_and_test(&data->refcount))
2644 return;
2645
Peter Zijlstra906010b2009-09-21 16:08:49 +02002646 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002647}
2648
2649static void perf_mmap_open(struct vm_area_struct *vma)
2650{
2651 struct perf_event *event = vma->vm_file->private_data;
2652
2653 atomic_inc(&event->mmap_count);
2654}
2655
2656static void perf_mmap_close(struct vm_area_struct *vma)
2657{
2658 struct perf_event *event = vma->vm_file->private_data;
2659
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002660 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
Peter Zijlstra906010b2009-09-21 16:08:49 +02002661 unsigned long size = perf_data_size(event->data);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002662 struct user_struct *user = event->mmap_user;
2663 struct perf_mmap_data *data = event->data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002664
Peter Zijlstra906010b2009-09-21 16:08:49 +02002665 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002666 vma->vm_mm->locked_vm -= event->mmap_locked;
2667 rcu_assign_pointer(event->data, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002668 mutex_unlock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002669
2670 perf_mmap_data_put(data);
2671 free_uid(user);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002672 }
2673}
2674
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +04002675static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002676 .open = perf_mmap_open,
2677 .close = perf_mmap_close,
2678 .fault = perf_mmap_fault,
2679 .page_mkwrite = perf_mmap_fault,
2680};
2681
2682static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2683{
2684 struct perf_event *event = file->private_data;
2685 unsigned long user_locked, user_lock_limit;
2686 struct user_struct *user = current_user();
2687 unsigned long locked, lock_limit;
Peter Zijlstra906010b2009-09-21 16:08:49 +02002688 struct perf_mmap_data *data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002689 unsigned long vma_size;
2690 unsigned long nr_pages;
2691 long user_extra, extra;
2692 int ret = 0;
2693
Peter Zijlstrac7920612010-05-18 10:33:24 +02002694 /*
2695 * Don't allow mmap() of inherited per-task counters. This would
2696 * create a performance issue due to all children writing to the
2697 * same buffer.
2698 */
2699 if (event->cpu == -1 && event->attr.inherit)
2700 return -EINVAL;
2701
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002702 if (!(vma->vm_flags & VM_SHARED))
2703 return -EINVAL;
2704
2705 vma_size = vma->vm_end - vma->vm_start;
2706 nr_pages = (vma_size / PAGE_SIZE) - 1;
2707
2708 /*
2709 * If we have data pages ensure they're a power-of-two number, so we
2710 * can do bitmasks instead of modulo.
2711 */
2712 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2713 return -EINVAL;
2714
2715 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2716 return -EINVAL;
2717
2718 if (vma->vm_pgoff != 0)
2719 return -EINVAL;
2720
2721 WARN_ON_ONCE(event->ctx->parent_ctx);
2722 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002723 if (event->data) {
2724 if (event->data->nr_pages == nr_pages)
2725 atomic_inc(&event->data->refcount);
2726 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002727 ret = -EINVAL;
2728 goto unlock;
2729 }
2730
2731 user_extra = nr_pages + 1;
2732 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2733
2734 /*
2735 * Increase the limit linearly with more CPUs:
2736 */
2737 user_lock_limit *= num_online_cpus();
2738
2739 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2740
2741 extra = 0;
2742 if (user_locked > user_lock_limit)
2743 extra = user_locked - user_lock_limit;
2744
Jiri Slaby78d7d402010-03-05 13:42:54 -08002745 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002746 lock_limit >>= PAGE_SHIFT;
2747 locked = vma->vm_mm->locked_vm + extra;
2748
2749 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2750 !capable(CAP_IPC_LOCK)) {
2751 ret = -EPERM;
2752 goto unlock;
2753 }
2754
2755 WARN_ON(event->data);
Peter Zijlstra906010b2009-09-21 16:08:49 +02002756
2757 data = perf_mmap_data_alloc(event, nr_pages);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002758 if (!data) {
2759 ret = -ENOMEM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002760 goto unlock;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002761 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002762
Peter Zijlstra906010b2009-09-21 16:08:49 +02002763 perf_mmap_data_init(event, data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002764 if (vma->vm_flags & VM_WRITE)
2765 event->data->writable = 1;
2766
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002767 atomic_long_add(user_extra, &user->locked_vm);
2768 event->mmap_locked = extra;
2769 event->mmap_user = get_current_user();
2770 vma->vm_mm->locked_vm += event->mmap_locked;
2771
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002772unlock:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02002773 if (!ret)
2774 atomic_inc(&event->mmap_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002775 mutex_unlock(&event->mmap_mutex);
2776
2777 vma->vm_flags |= VM_RESERVED;
2778 vma->vm_ops = &perf_mmap_vmops;
2779
2780 return ret;
2781}
2782
2783static int perf_fasync(int fd, struct file *filp, int on)
2784{
2785 struct inode *inode = filp->f_path.dentry->d_inode;
2786 struct perf_event *event = filp->private_data;
2787 int retval;
2788
2789 mutex_lock(&inode->i_mutex);
2790 retval = fasync_helper(fd, filp, on, &event->fasync);
2791 mutex_unlock(&inode->i_mutex);
2792
2793 if (retval < 0)
2794 return retval;
2795
2796 return 0;
2797}
2798
2799static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01002800 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002801 .release = perf_release,
2802 .read = perf_read,
2803 .poll = perf_poll,
2804 .unlocked_ioctl = perf_ioctl,
2805 .compat_ioctl = perf_ioctl,
2806 .mmap = perf_mmap,
2807 .fasync = perf_fasync,
2808};
2809
2810/*
2811 * Perf event wakeup
2812 *
2813 * If there's data, ensure we set the poll() state and publish everything
2814 * to user-space before waking everybody up.
2815 */
2816
2817void perf_event_wakeup(struct perf_event *event)
2818{
2819 wake_up_all(&event->waitq);
2820
2821 if (event->pending_kill) {
2822 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2823 event->pending_kill = 0;
2824 }
2825}
2826
2827/*
2828 * Pending wakeups
2829 *
2830 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2831 *
2832 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2833 * single linked list and use cmpxchg() to add entries lockless.
2834 */
2835
2836static void perf_pending_event(struct perf_pending_entry *entry)
2837{
2838 struct perf_event *event = container_of(entry,
2839 struct perf_event, pending);
2840
2841 if (event->pending_disable) {
2842 event->pending_disable = 0;
2843 __perf_event_disable(event);
2844 }
2845
2846 if (event->pending_wakeup) {
2847 event->pending_wakeup = 0;
2848 perf_event_wakeup(event);
2849 }
2850}
2851
2852#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2853
2854static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2855 PENDING_TAIL,
2856};
2857
2858static void perf_pending_queue(struct perf_pending_entry *entry,
2859 void (*func)(struct perf_pending_entry *))
2860{
2861 struct perf_pending_entry **head;
2862
2863 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2864 return;
2865
2866 entry->func = func;
2867
2868 head = &get_cpu_var(perf_pending_head);
2869
2870 do {
2871 entry->next = *head;
2872 } while (cmpxchg(head, entry->next, entry) != entry->next);
2873
2874 set_perf_event_pending();
2875
2876 put_cpu_var(perf_pending_head);
2877}
2878
2879static int __perf_pending_run(void)
2880{
2881 struct perf_pending_entry *list;
2882 int nr = 0;
2883
2884 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2885 while (list != PENDING_TAIL) {
2886 void (*func)(struct perf_pending_entry *);
2887 struct perf_pending_entry *entry = list;
2888
2889 list = list->next;
2890
2891 func = entry->func;
2892 entry->next = NULL;
2893 /*
2894 * Ensure we observe the unqueue before we issue the wakeup,
2895 * so that we won't be waiting forever.
2896 * -- see perf_not_pending().
2897 */
2898 smp_wmb();
2899
2900 func(entry);
2901 nr++;
2902 }
2903
2904 return nr;
2905}
2906
2907static inline int perf_not_pending(struct perf_event *event)
2908{
2909 /*
2910 * If we flush on whatever cpu we run, there is a chance we don't
2911 * need to wait.
2912 */
2913 get_cpu();
2914 __perf_pending_run();
2915 put_cpu();
2916
2917 /*
2918 * Ensure we see the proper queue state before going to sleep
2919 * so that we do not miss the wakeup. -- see perf_pending_handle()
2920 */
2921 smp_rmb();
2922 return event->pending.next == NULL;
2923}
2924
2925static void perf_pending_sync(struct perf_event *event)
2926{
2927 wait_event(event->waitq, perf_not_pending(event));
2928}
2929
2930void perf_event_do_pending(void)
2931{
2932 __perf_pending_run();
2933}
2934
2935/*
2936 * Callchain support -- arch specific
2937 */
2938
2939__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2940{
2941 return NULL;
2942}
2943
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01002944__weak
2945void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2946{
2947}
Frederic Weisbecker26d80aa2010-04-03 12:22:05 +02002948
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01002949
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002950/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002951 * We assume there is only KVM supporting the callbacks.
2952 * Later on, we might change it to a list if there is
2953 * another virtualization implementation supporting the callbacks.
2954 */
2955struct perf_guest_info_callbacks *perf_guest_cbs;
2956
2957int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
2958{
2959 perf_guest_cbs = cbs;
2960 return 0;
2961}
2962EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
2963
2964int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
2965{
2966 perf_guest_cbs = NULL;
2967 return 0;
2968}
2969EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
2970
2971/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002972 * Output
2973 */
2974static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2975 unsigned long offset, unsigned long head)
2976{
2977 unsigned long mask;
2978
2979 if (!data->writable)
2980 return true;
2981
Peter Zijlstra906010b2009-09-21 16:08:49 +02002982 mask = perf_data_size(data) - 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002983
2984 offset = (offset - tail) & mask;
2985 head = (head - tail) & mask;
2986
2987 if ((int)(head - offset) < 0)
2988 return false;
2989
2990 return true;
2991}
2992
2993static void perf_output_wakeup(struct perf_output_handle *handle)
2994{
2995 atomic_set(&handle->data->poll, POLL_IN);
2996
2997 if (handle->nmi) {
2998 handle->event->pending_wakeup = 1;
2999 perf_pending_queue(&handle->event->pending,
3000 perf_pending_event);
3001 } else
3002 perf_event_wakeup(handle->event);
3003}
3004
3005/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003006 * We need to ensure a later event_id doesn't publish a head when a former
Peter Zijlstraef607772010-05-18 10:50:41 +02003007 * event isn't done writing. However since we need to deal with NMIs we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003008 * cannot fully serialize things.
3009 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003010 * We only publish the head (and generate a wakeup) when the outer-most
Peter Zijlstraef607772010-05-18 10:50:41 +02003011 * event completes.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003012 */
Peter Zijlstraef607772010-05-18 10:50:41 +02003013static void perf_output_get_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003014{
3015 struct perf_mmap_data *data = handle->data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003016
Peter Zijlstraef607772010-05-18 10:50:41 +02003017 preempt_disable();
Peter Zijlstrafa588152010-05-18 10:54:20 +02003018 local_inc(&data->nest);
Peter Zijlstra6d1acfd2010-05-18 11:12:48 +02003019 handle->wakeup = local_read(&data->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003020}
3021
Peter Zijlstraef607772010-05-18 10:50:41 +02003022static void perf_output_put_handle(struct perf_output_handle *handle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003023{
3024 struct perf_mmap_data *data = handle->data;
3025 unsigned long head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003026
3027again:
Peter Zijlstrafa588152010-05-18 10:54:20 +02003028 head = local_read(&data->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003029
3030 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003031 * IRQ/NMI can happen here, which means we can miss a head update.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003032 */
3033
Peter Zijlstrafa588152010-05-18 10:54:20 +02003034 if (!local_dec_and_test(&data->nest))
Frederic Weisbeckeracd35a42010-05-20 21:28:34 +02003035 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003036
3037 /*
Peter Zijlstraef607772010-05-18 10:50:41 +02003038 * Publish the known good head. Rely on the full barrier implied
3039 * by atomic_dec_and_test() order the data->head read and this
3040 * write.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003041 */
Peter Zijlstraef607772010-05-18 10:50:41 +02003042 data->user_page->data_head = head;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003043
Peter Zijlstraef607772010-05-18 10:50:41 +02003044 /*
3045 * Now check if we missed an update, rely on the (compiler)
3046 * barrier in atomic_dec_and_test() to re-read data->head.
3047 */
Peter Zijlstrafa588152010-05-18 10:54:20 +02003048 if (unlikely(head != local_read(&data->head))) {
3049 local_inc(&data->nest);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003050 goto again;
3051 }
3052
Peter Zijlstra6d1acfd2010-05-18 11:12:48 +02003053 if (handle->wakeup != local_read(&data->wakeup))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003054 perf_output_wakeup(handle);
Peter Zijlstraef607772010-05-18 10:50:41 +02003055
Frederic Weisbeckeracd35a42010-05-20 21:28:34 +02003056 out:
Peter Zijlstraef607772010-05-18 10:50:41 +02003057 preempt_enable();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003058}
3059
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003060__always_inline void perf_output_copy(struct perf_output_handle *handle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003061 const void *buf, unsigned int len)
3062{
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003063 do {
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003064 unsigned long size = min_t(unsigned long, handle->size, len);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003065
3066 memcpy(handle->addr, buf, size);
3067
3068 len -= size;
3069 handle->addr += size;
Frederic Weisbecker74048f82010-05-27 21:34:58 +02003070 buf += size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003071 handle->size -= size;
3072 if (!handle->size) {
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003073 struct perf_mmap_data *data = handle->data;
3074
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003075 handle->page++;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003076 handle->page &= data->nr_pages - 1;
3077 handle->addr = data->data_pages[handle->page];
3078 handle->size = PAGE_SIZE << page_order(data);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003079 }
3080 } while (len);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003081}
3082
3083int perf_output_begin(struct perf_output_handle *handle,
3084 struct perf_event *event, unsigned int size,
3085 int nmi, int sample)
3086{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003087 struct perf_mmap_data *data;
3088 unsigned long tail, offset, head;
3089 int have_lost;
3090 struct {
3091 struct perf_event_header header;
3092 u64 id;
3093 u64 lost;
3094 } lost_event;
3095
3096 rcu_read_lock();
3097 /*
3098 * For inherited events we send all the output towards the parent.
3099 */
3100 if (event->parent)
3101 event = event->parent;
3102
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003103 data = rcu_dereference(event->data);
3104 if (!data)
3105 goto out;
3106
3107 handle->data = data;
3108 handle->event = event;
3109 handle->nmi = nmi;
3110 handle->sample = sample;
3111
3112 if (!data->nr_pages)
Stephane Eranian00d1d0b2010-05-17 12:46:01 +02003113 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003114
Peter Zijlstrafa588152010-05-18 10:54:20 +02003115 have_lost = local_read(&data->lost);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003116 if (have_lost)
3117 size += sizeof(lost_event);
3118
Peter Zijlstraef607772010-05-18 10:50:41 +02003119 perf_output_get_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003120
3121 do {
3122 /*
3123 * Userspace could choose to issue a mb() before updating the
3124 * tail pointer. So that all reads will be completed before the
3125 * write is issued.
3126 */
3127 tail = ACCESS_ONCE(data->user_page->data_tail);
3128 smp_rmb();
Peter Zijlstrafa588152010-05-18 10:54:20 +02003129 offset = head = local_read(&data->head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003130 head += size;
3131 if (unlikely(!perf_output_space(data, tail, offset, head)))
3132 goto fail;
Peter Zijlstrafa588152010-05-18 10:54:20 +02003133 } while (local_cmpxchg(&data->head, offset, head) != offset);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003134
Peter Zijlstraadb8e112010-05-20 16:21:55 +02003135 if (head - local_read(&data->wakeup) > data->watermark)
3136 local_add(data->watermark, &data->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003137
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003138 handle->page = offset >> (PAGE_SHIFT + page_order(data));
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003139 handle->page &= data->nr_pages - 1;
Peter Zijlstraa94ffaa2010-05-20 19:50:07 +02003140 handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1);
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003141 handle->addr = data->data_pages[handle->page];
3142 handle->addr += handle->size;
Peter Zijlstra3cafa9f2010-05-20 19:07:56 +02003143 handle->size = (PAGE_SIZE << page_order(data)) - handle->size;
Peter Zijlstra5d967a82010-05-20 16:46:39 +02003144
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003145 if (have_lost) {
3146 lost_event.header.type = PERF_RECORD_LOST;
3147 lost_event.header.misc = 0;
3148 lost_event.header.size = sizeof(lost_event);
3149 lost_event.id = event->id;
Peter Zijlstrafa588152010-05-18 10:54:20 +02003150 lost_event.lost = local_xchg(&data->lost, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003151
3152 perf_output_put(handle, lost_event);
3153 }
3154
3155 return 0;
3156
3157fail:
Peter Zijlstrafa588152010-05-18 10:54:20 +02003158 local_inc(&data->lost);
Peter Zijlstraef607772010-05-18 10:50:41 +02003159 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003160out:
3161 rcu_read_unlock();
3162
3163 return -ENOSPC;
3164}
3165
3166void perf_output_end(struct perf_output_handle *handle)
3167{
3168 struct perf_event *event = handle->event;
3169 struct perf_mmap_data *data = handle->data;
3170
3171 int wakeup_events = event->attr.wakeup_events;
3172
3173 if (handle->sample && wakeup_events) {
Peter Zijlstrafa588152010-05-18 10:54:20 +02003174 int events = local_inc_return(&data->events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003175 if (events >= wakeup_events) {
Peter Zijlstrafa588152010-05-18 10:54:20 +02003176 local_sub(wakeup_events, &data->events);
3177 local_inc(&data->wakeup);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003178 }
3179 }
3180
Peter Zijlstraef607772010-05-18 10:50:41 +02003181 perf_output_put_handle(handle);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003182 rcu_read_unlock();
3183}
3184
3185static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3186{
3187 /*
3188 * only top level events have the pid namespace they were created in
3189 */
3190 if (event->parent)
3191 event = event->parent;
3192
3193 return task_tgid_nr_ns(p, event->ns);
3194}
3195
3196static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3197{
3198 /*
3199 * only top level events have the pid namespace they were created in
3200 */
3201 if (event->parent)
3202 event = event->parent;
3203
3204 return task_pid_nr_ns(p, event->ns);
3205}
3206
3207static void perf_output_read_one(struct perf_output_handle *handle,
3208 struct perf_event *event)
3209{
3210 u64 read_format = event->attr.read_format;
3211 u64 values[4];
3212 int n = 0;
3213
3214 values[n++] = atomic64_read(&event->count);
3215 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3216 values[n++] = event->total_time_enabled +
3217 atomic64_read(&event->child_total_time_enabled);
3218 }
3219 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3220 values[n++] = event->total_time_running +
3221 atomic64_read(&event->child_total_time_running);
3222 }
3223 if (read_format & PERF_FORMAT_ID)
3224 values[n++] = primary_event_id(event);
3225
3226 perf_output_copy(handle, values, n * sizeof(u64));
3227}
3228
3229/*
3230 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3231 */
3232static void perf_output_read_group(struct perf_output_handle *handle,
3233 struct perf_event *event)
3234{
3235 struct perf_event *leader = event->group_leader, *sub;
3236 u64 read_format = event->attr.read_format;
3237 u64 values[5];
3238 int n = 0;
3239
3240 values[n++] = 1 + leader->nr_siblings;
3241
3242 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3243 values[n++] = leader->total_time_enabled;
3244
3245 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3246 values[n++] = leader->total_time_running;
3247
3248 if (leader != event)
3249 leader->pmu->read(leader);
3250
3251 values[n++] = atomic64_read(&leader->count);
3252 if (read_format & PERF_FORMAT_ID)
3253 values[n++] = primary_event_id(leader);
3254
3255 perf_output_copy(handle, values, n * sizeof(u64));
3256
3257 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3258 n = 0;
3259
3260 if (sub != event)
3261 sub->pmu->read(sub);
3262
3263 values[n++] = atomic64_read(&sub->count);
3264 if (read_format & PERF_FORMAT_ID)
3265 values[n++] = primary_event_id(sub);
3266
3267 perf_output_copy(handle, values, n * sizeof(u64));
3268 }
3269}
3270
3271static void perf_output_read(struct perf_output_handle *handle,
3272 struct perf_event *event)
3273{
3274 if (event->attr.read_format & PERF_FORMAT_GROUP)
3275 perf_output_read_group(handle, event);
3276 else
3277 perf_output_read_one(handle, event);
3278}
3279
3280void perf_output_sample(struct perf_output_handle *handle,
3281 struct perf_event_header *header,
3282 struct perf_sample_data *data,
3283 struct perf_event *event)
3284{
3285 u64 sample_type = data->type;
3286
3287 perf_output_put(handle, *header);
3288
3289 if (sample_type & PERF_SAMPLE_IP)
3290 perf_output_put(handle, data->ip);
3291
3292 if (sample_type & PERF_SAMPLE_TID)
3293 perf_output_put(handle, data->tid_entry);
3294
3295 if (sample_type & PERF_SAMPLE_TIME)
3296 perf_output_put(handle, data->time);
3297
3298 if (sample_type & PERF_SAMPLE_ADDR)
3299 perf_output_put(handle, data->addr);
3300
3301 if (sample_type & PERF_SAMPLE_ID)
3302 perf_output_put(handle, data->id);
3303
3304 if (sample_type & PERF_SAMPLE_STREAM_ID)
3305 perf_output_put(handle, data->stream_id);
3306
3307 if (sample_type & PERF_SAMPLE_CPU)
3308 perf_output_put(handle, data->cpu_entry);
3309
3310 if (sample_type & PERF_SAMPLE_PERIOD)
3311 perf_output_put(handle, data->period);
3312
3313 if (sample_type & PERF_SAMPLE_READ)
3314 perf_output_read(handle, event);
3315
3316 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3317 if (data->callchain) {
3318 int size = 1;
3319
3320 if (data->callchain)
3321 size += data->callchain->nr;
3322
3323 size *= sizeof(u64);
3324
3325 perf_output_copy(handle, data->callchain, size);
3326 } else {
3327 u64 nr = 0;
3328 perf_output_put(handle, nr);
3329 }
3330 }
3331
3332 if (sample_type & PERF_SAMPLE_RAW) {
3333 if (data->raw) {
3334 perf_output_put(handle, data->raw->size);
3335 perf_output_copy(handle, data->raw->data,
3336 data->raw->size);
3337 } else {
3338 struct {
3339 u32 size;
3340 u32 data;
3341 } raw = {
3342 .size = sizeof(u32),
3343 .data = 0,
3344 };
3345 perf_output_put(handle, raw);
3346 }
3347 }
3348}
3349
3350void perf_prepare_sample(struct perf_event_header *header,
3351 struct perf_sample_data *data,
3352 struct perf_event *event,
3353 struct pt_regs *regs)
3354{
3355 u64 sample_type = event->attr.sample_type;
3356
3357 data->type = sample_type;
3358
3359 header->type = PERF_RECORD_SAMPLE;
3360 header->size = sizeof(*header);
3361
3362 header->misc = 0;
3363 header->misc |= perf_misc_flags(regs);
3364
3365 if (sample_type & PERF_SAMPLE_IP) {
3366 data->ip = perf_instruction_pointer(regs);
3367
3368 header->size += sizeof(data->ip);
3369 }
3370
3371 if (sample_type & PERF_SAMPLE_TID) {
3372 /* namespace issues */
3373 data->tid_entry.pid = perf_event_pid(event, current);
3374 data->tid_entry.tid = perf_event_tid(event, current);
3375
3376 header->size += sizeof(data->tid_entry);
3377 }
3378
3379 if (sample_type & PERF_SAMPLE_TIME) {
3380 data->time = perf_clock();
3381
3382 header->size += sizeof(data->time);
3383 }
3384
3385 if (sample_type & PERF_SAMPLE_ADDR)
3386 header->size += sizeof(data->addr);
3387
3388 if (sample_type & PERF_SAMPLE_ID) {
3389 data->id = primary_event_id(event);
3390
3391 header->size += sizeof(data->id);
3392 }
3393
3394 if (sample_type & PERF_SAMPLE_STREAM_ID) {
3395 data->stream_id = event->id;
3396
3397 header->size += sizeof(data->stream_id);
3398 }
3399
3400 if (sample_type & PERF_SAMPLE_CPU) {
3401 data->cpu_entry.cpu = raw_smp_processor_id();
3402 data->cpu_entry.reserved = 0;
3403
3404 header->size += sizeof(data->cpu_entry);
3405 }
3406
3407 if (sample_type & PERF_SAMPLE_PERIOD)
3408 header->size += sizeof(data->period);
3409
3410 if (sample_type & PERF_SAMPLE_READ)
3411 header->size += perf_event_read_size(event);
3412
3413 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3414 int size = 1;
3415
3416 data->callchain = perf_callchain(regs);
3417
3418 if (data->callchain)
3419 size += data->callchain->nr;
3420
3421 header->size += size * sizeof(u64);
3422 }
3423
3424 if (sample_type & PERF_SAMPLE_RAW) {
3425 int size = sizeof(u32);
3426
3427 if (data->raw)
3428 size += data->raw->size;
3429 else
3430 size += sizeof(u32);
3431
3432 WARN_ON_ONCE(size & (sizeof(u64)-1));
3433 header->size += size;
3434 }
3435}
3436
3437static void perf_event_output(struct perf_event *event, int nmi,
3438 struct perf_sample_data *data,
3439 struct pt_regs *regs)
3440{
3441 struct perf_output_handle handle;
3442 struct perf_event_header header;
3443
3444 perf_prepare_sample(&header, data, event, regs);
3445
3446 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3447 return;
3448
3449 perf_output_sample(&handle, &header, data, event);
3450
3451 perf_output_end(&handle);
3452}
3453
3454/*
3455 * read event_id
3456 */
3457
3458struct perf_read_event {
3459 struct perf_event_header header;
3460
3461 u32 pid;
3462 u32 tid;
3463};
3464
3465static void
3466perf_event_read_event(struct perf_event *event,
3467 struct task_struct *task)
3468{
3469 struct perf_output_handle handle;
3470 struct perf_read_event read_event = {
3471 .header = {
3472 .type = PERF_RECORD_READ,
3473 .misc = 0,
3474 .size = sizeof(read_event) + perf_event_read_size(event),
3475 },
3476 .pid = perf_event_pid(event, task),
3477 .tid = perf_event_tid(event, task),
3478 };
3479 int ret;
3480
3481 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3482 if (ret)
3483 return;
3484
3485 perf_output_put(&handle, read_event);
3486 perf_output_read(&handle, event);
3487
3488 perf_output_end(&handle);
3489}
3490
3491/*
3492 * task tracking -- fork/exit
3493 *
3494 * enabled by: attr.comm | attr.mmap | attr.task
3495 */
3496
3497struct perf_task_event {
3498 struct task_struct *task;
3499 struct perf_event_context *task_ctx;
3500
3501 struct {
3502 struct perf_event_header header;
3503
3504 u32 pid;
3505 u32 ppid;
3506 u32 tid;
3507 u32 ptid;
3508 u64 time;
3509 } event_id;
3510};
3511
3512static void perf_event_task_output(struct perf_event *event,
3513 struct perf_task_event *task_event)
3514{
3515 struct perf_output_handle handle;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003516 struct task_struct *task = task_event->task;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01003517 int size, ret;
3518
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003519 size = task_event->event_id.header.size;
3520 ret = perf_output_begin(&handle, event, size, 0, 0);
3521
Peter Zijlstraef607772010-05-18 10:50:41 +02003522 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003523 return;
3524
3525 task_event->event_id.pid = perf_event_pid(event, task);
3526 task_event->event_id.ppid = perf_event_pid(event, current);
3527
3528 task_event->event_id.tid = perf_event_tid(event, task);
3529 task_event->event_id.ptid = perf_event_tid(event, current);
3530
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003531 perf_output_put(&handle, task_event->event_id);
3532
3533 perf_output_end(&handle);
3534}
3535
3536static int perf_event_task_match(struct perf_event *event)
3537{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003538 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003539 return 0;
3540
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003541 if (event->cpu != -1 && event->cpu != smp_processor_id())
3542 return 0;
3543
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003544 if (event->attr.comm || event->attr.mmap || event->attr.task)
3545 return 1;
3546
3547 return 0;
3548}
3549
3550static void perf_event_task_ctx(struct perf_event_context *ctx,
3551 struct perf_task_event *task_event)
3552{
3553 struct perf_event *event;
3554
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003555 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3556 if (perf_event_task_match(event))
3557 perf_event_task_output(event, task_event);
3558 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003559}
3560
3561static void perf_event_task_event(struct perf_task_event *task_event)
3562{
3563 struct perf_cpu_context *cpuctx;
3564 struct perf_event_context *ctx = task_event->task_ctx;
3565
Peter Zijlstrad6ff86c2009-11-20 22:19:46 +01003566 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003567 cpuctx = &get_cpu_var(perf_cpu_context);
3568 perf_event_task_ctx(&cpuctx->ctx, task_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003569 if (!ctx)
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003570 ctx = rcu_dereference(current->perf_event_ctxp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003571 if (ctx)
3572 perf_event_task_ctx(ctx, task_event);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003573 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003574 rcu_read_unlock();
3575}
3576
3577static void perf_event_task(struct task_struct *task,
3578 struct perf_event_context *task_ctx,
3579 int new)
3580{
3581 struct perf_task_event task_event;
3582
3583 if (!atomic_read(&nr_comm_events) &&
3584 !atomic_read(&nr_mmap_events) &&
3585 !atomic_read(&nr_task_events))
3586 return;
3587
3588 task_event = (struct perf_task_event){
3589 .task = task,
3590 .task_ctx = task_ctx,
3591 .event_id = {
3592 .header = {
3593 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3594 .misc = 0,
3595 .size = sizeof(task_event.event_id),
3596 },
3597 /* .pid */
3598 /* .ppid */
3599 /* .tid */
3600 /* .ptid */
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003601 .time = perf_clock(),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003602 },
3603 };
3604
3605 perf_event_task_event(&task_event);
3606}
3607
3608void perf_event_fork(struct task_struct *task)
3609{
3610 perf_event_task(task, NULL, 1);
3611}
3612
3613/*
3614 * comm tracking
3615 */
3616
3617struct perf_comm_event {
3618 struct task_struct *task;
3619 char *comm;
3620 int comm_size;
3621
3622 struct {
3623 struct perf_event_header header;
3624
3625 u32 pid;
3626 u32 tid;
3627 } event_id;
3628};
3629
3630static void perf_event_comm_output(struct perf_event *event,
3631 struct perf_comm_event *comm_event)
3632{
3633 struct perf_output_handle handle;
3634 int size = comm_event->event_id.header.size;
3635 int ret = perf_output_begin(&handle, event, size, 0, 0);
3636
3637 if (ret)
3638 return;
3639
3640 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3641 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3642
3643 perf_output_put(&handle, comm_event->event_id);
3644 perf_output_copy(&handle, comm_event->comm,
3645 comm_event->comm_size);
3646 perf_output_end(&handle);
3647}
3648
3649static int perf_event_comm_match(struct perf_event *event)
3650{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003651 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003652 return 0;
3653
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003654 if (event->cpu != -1 && event->cpu != smp_processor_id())
3655 return 0;
3656
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003657 if (event->attr.comm)
3658 return 1;
3659
3660 return 0;
3661}
3662
3663static void perf_event_comm_ctx(struct perf_event_context *ctx,
3664 struct perf_comm_event *comm_event)
3665{
3666 struct perf_event *event;
3667
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003668 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3669 if (perf_event_comm_match(event))
3670 perf_event_comm_output(event, comm_event);
3671 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003672}
3673
3674static void perf_event_comm_event(struct perf_comm_event *comm_event)
3675{
3676 struct perf_cpu_context *cpuctx;
3677 struct perf_event_context *ctx;
3678 unsigned int size;
3679 char comm[TASK_COMM_LEN];
3680
3681 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01003682 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003683 size = ALIGN(strlen(comm)+1, sizeof(u64));
3684
3685 comm_event->comm = comm;
3686 comm_event->comm_size = size;
3687
3688 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3689
Peter Zijlstraf6595f32009-11-20 22:19:47 +01003690 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003691 cpuctx = &get_cpu_var(perf_cpu_context);
3692 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003693 ctx = rcu_dereference(current->perf_event_ctxp);
3694 if (ctx)
3695 perf_event_comm_ctx(ctx, comm_event);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003696 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003697 rcu_read_unlock();
3698}
3699
3700void perf_event_comm(struct task_struct *task)
3701{
3702 struct perf_comm_event comm_event;
3703
3704 if (task->perf_event_ctxp)
3705 perf_event_enable_on_exec(task);
3706
3707 if (!atomic_read(&nr_comm_events))
3708 return;
3709
3710 comm_event = (struct perf_comm_event){
3711 .task = task,
3712 /* .comm */
3713 /* .comm_size */
3714 .event_id = {
3715 .header = {
3716 .type = PERF_RECORD_COMM,
3717 .misc = 0,
3718 /* .size */
3719 },
3720 /* .pid */
3721 /* .tid */
3722 },
3723 };
3724
3725 perf_event_comm_event(&comm_event);
3726}
3727
3728/*
3729 * mmap tracking
3730 */
3731
3732struct perf_mmap_event {
3733 struct vm_area_struct *vma;
3734
3735 const char *file_name;
3736 int file_size;
3737
3738 struct {
3739 struct perf_event_header header;
3740
3741 u32 pid;
3742 u32 tid;
3743 u64 start;
3744 u64 len;
3745 u64 pgoff;
3746 } event_id;
3747};
3748
3749static void perf_event_mmap_output(struct perf_event *event,
3750 struct perf_mmap_event *mmap_event)
3751{
3752 struct perf_output_handle handle;
3753 int size = mmap_event->event_id.header.size;
3754 int ret = perf_output_begin(&handle, event, size, 0, 0);
3755
3756 if (ret)
3757 return;
3758
3759 mmap_event->event_id.pid = perf_event_pid(event, current);
3760 mmap_event->event_id.tid = perf_event_tid(event, current);
3761
3762 perf_output_put(&handle, mmap_event->event_id);
3763 perf_output_copy(&handle, mmap_event->file_name,
3764 mmap_event->file_size);
3765 perf_output_end(&handle);
3766}
3767
3768static int perf_event_mmap_match(struct perf_event *event,
3769 struct perf_mmap_event *mmap_event)
3770{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01003771 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01003772 return 0;
3773
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003774 if (event->cpu != -1 && event->cpu != smp_processor_id())
3775 return 0;
3776
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003777 if (event->attr.mmap)
3778 return 1;
3779
3780 return 0;
3781}
3782
3783static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3784 struct perf_mmap_event *mmap_event)
3785{
3786 struct perf_event *event;
3787
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003788 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3789 if (perf_event_mmap_match(event, mmap_event))
3790 perf_event_mmap_output(event, mmap_event);
3791 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003792}
3793
3794static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3795{
3796 struct perf_cpu_context *cpuctx;
3797 struct perf_event_context *ctx;
3798 struct vm_area_struct *vma = mmap_event->vma;
3799 struct file *file = vma->vm_file;
3800 unsigned int size;
3801 char tmp[16];
3802 char *buf = NULL;
3803 const char *name;
3804
3805 memset(tmp, 0, sizeof(tmp));
3806
3807 if (file) {
3808 /*
3809 * d_path works from the end of the buffer backwards, so we
3810 * need to add enough zero bytes after the string to handle
3811 * the 64bit alignment we do later.
3812 */
3813 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3814 if (!buf) {
3815 name = strncpy(tmp, "//enomem", sizeof(tmp));
3816 goto got_name;
3817 }
3818 name = d_path(&file->f_path, buf, PATH_MAX);
3819 if (IS_ERR(name)) {
3820 name = strncpy(tmp, "//toolong", sizeof(tmp));
3821 goto got_name;
3822 }
3823 } else {
3824 if (arch_vma_name(mmap_event->vma)) {
3825 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3826 sizeof(tmp));
3827 goto got_name;
3828 }
3829
3830 if (!vma->vm_mm) {
3831 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3832 goto got_name;
3833 }
3834
3835 name = strncpy(tmp, "//anon", sizeof(tmp));
3836 goto got_name;
3837 }
3838
3839got_name:
3840 size = ALIGN(strlen(name)+1, sizeof(u64));
3841
3842 mmap_event->file_name = name;
3843 mmap_event->file_size = size;
3844
3845 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3846
Peter Zijlstraf6d9dd22009-11-20 22:19:48 +01003847 rcu_read_lock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003848 cpuctx = &get_cpu_var(perf_cpu_context);
3849 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003850 ctx = rcu_dereference(current->perf_event_ctxp);
3851 if (ctx)
3852 perf_event_mmap_ctx(ctx, mmap_event);
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003853 put_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003854 rcu_read_unlock();
3855
3856 kfree(buf);
3857}
3858
3859void __perf_event_mmap(struct vm_area_struct *vma)
3860{
3861 struct perf_mmap_event mmap_event;
3862
3863 if (!atomic_read(&nr_mmap_events))
3864 return;
3865
3866 mmap_event = (struct perf_mmap_event){
3867 .vma = vma,
3868 /* .file_name */
3869 /* .file_size */
3870 .event_id = {
3871 .header = {
3872 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08003873 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003874 /* .size */
3875 },
3876 /* .pid */
3877 /* .tid */
3878 .start = vma->vm_start,
3879 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01003880 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003881 },
3882 };
3883
3884 perf_event_mmap_event(&mmap_event);
3885}
3886
3887/*
3888 * IRQ throttle logging
3889 */
3890
3891static void perf_log_throttle(struct perf_event *event, int enable)
3892{
3893 struct perf_output_handle handle;
3894 int ret;
3895
3896 struct {
3897 struct perf_event_header header;
3898 u64 time;
3899 u64 id;
3900 u64 stream_id;
3901 } throttle_event = {
3902 .header = {
3903 .type = PERF_RECORD_THROTTLE,
3904 .misc = 0,
3905 .size = sizeof(throttle_event),
3906 },
3907 .time = perf_clock(),
3908 .id = primary_event_id(event),
3909 .stream_id = event->id,
3910 };
3911
3912 if (enable)
3913 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3914
3915 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3916 if (ret)
3917 return;
3918
3919 perf_output_put(&handle, throttle_event);
3920 perf_output_end(&handle);
3921}
3922
3923/*
3924 * Generic event overflow handling, sampling.
3925 */
3926
3927static int __perf_event_overflow(struct perf_event *event, int nmi,
3928 int throttle, struct perf_sample_data *data,
3929 struct pt_regs *regs)
3930{
3931 int events = atomic_read(&event->event_limit);
3932 struct hw_perf_event *hwc = &event->hw;
3933 int ret = 0;
3934
3935 throttle = (throttle && event->pmu->unthrottle != NULL);
3936
3937 if (!throttle) {
3938 hwc->interrupts++;
3939 } else {
3940 if (hwc->interrupts != MAX_INTERRUPTS) {
3941 hwc->interrupts++;
3942 if (HZ * hwc->interrupts >
3943 (u64)sysctl_perf_event_sample_rate) {
3944 hwc->interrupts = MAX_INTERRUPTS;
3945 perf_log_throttle(event, 0);
3946 ret = 1;
3947 }
3948 } else {
3949 /*
3950 * Keep re-disabling events even though on the previous
3951 * pass we disabled it - just in case we raced with a
3952 * sched-in and the event got enabled again:
3953 */
3954 ret = 1;
3955 }
3956 }
3957
3958 if (event->attr.freq) {
3959 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01003960 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003961
Peter Zijlstraabd50712010-01-26 18:50:16 +01003962 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003963
Peter Zijlstraabd50712010-01-26 18:50:16 +01003964 if (delta > 0 && delta < 2*TICK_NSEC)
3965 perf_adjust_period(event, delta, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003966 }
3967
3968 /*
3969 * XXX event_limit might not quite work as expected on inherited
3970 * events
3971 */
3972
3973 event->pending_kill = POLL_IN;
3974 if (events && atomic_dec_and_test(&event->event_limit)) {
3975 ret = 1;
3976 event->pending_kill = POLL_HUP;
3977 if (nmi) {
3978 event->pending_disable = 1;
3979 perf_pending_queue(&event->pending,
3980 perf_pending_event);
3981 } else
3982 perf_event_disable(event);
3983 }
3984
Peter Zijlstra453f19e2009-11-20 22:19:43 +01003985 if (event->overflow_handler)
3986 event->overflow_handler(event, nmi, data, regs);
3987 else
3988 perf_event_output(event, nmi, data, regs);
3989
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003990 return ret;
3991}
3992
3993int perf_event_overflow(struct perf_event *event, int nmi,
3994 struct perf_sample_data *data,
3995 struct pt_regs *regs)
3996{
3997 return __perf_event_overflow(event, nmi, 1, data, regs);
3998}
3999
4000/*
4001 * Generic software event infrastructure
4002 */
4003
4004/*
4005 * We directly increment event->count and keep a second value in
4006 * event->hw.period_left to count intervals. This period event
4007 * is kept in the range [-sample_period, 0] so that we can use the
4008 * sign as trigger.
4009 */
4010
4011static u64 perf_swevent_set_period(struct perf_event *event)
4012{
4013 struct hw_perf_event *hwc = &event->hw;
4014 u64 period = hwc->last_period;
4015 u64 nr, offset;
4016 s64 old, val;
4017
4018 hwc->last_period = hwc->sample_period;
4019
4020again:
4021 old = val = atomic64_read(&hwc->period_left);
4022 if (val < 0)
4023 return 0;
4024
4025 nr = div64_u64(period + val, period);
4026 offset = nr * period;
4027 val -= offset;
4028 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
4029 goto again;
4030
4031 return nr;
4032}
4033
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004034static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004035 int nmi, struct perf_sample_data *data,
4036 struct pt_regs *regs)
4037{
4038 struct hw_perf_event *hwc = &event->hw;
4039 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004040
4041 data->period = event->hw.last_period;
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004042 if (!overflow)
4043 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004044
4045 if (hwc->interrupts == MAX_INTERRUPTS)
4046 return;
4047
4048 for (; overflow; overflow--) {
4049 if (__perf_event_overflow(event, nmi, throttle,
4050 data, regs)) {
4051 /*
4052 * We inhibit the overflow from happening when
4053 * hwc->interrupts == MAX_INTERRUPTS.
4054 */
4055 break;
4056 }
4057 throttle = 1;
4058 }
4059}
4060
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004061static void perf_swevent_add(struct perf_event *event, u64 nr,
4062 int nmi, struct perf_sample_data *data,
4063 struct pt_regs *regs)
4064{
4065 struct hw_perf_event *hwc = &event->hw;
4066
4067 atomic64_add(nr, &event->count);
4068
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004069 if (!regs)
4070 return;
4071
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004072 if (!hwc->sample_period)
4073 return;
4074
4075 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4076 return perf_swevent_overflow(event, 1, nmi, data, regs);
4077
4078 if (atomic64_add_negative(nr, &hwc->period_left))
4079 return;
4080
4081 perf_swevent_overflow(event, 0, nmi, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004082}
4083
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004084static int perf_exclude_event(struct perf_event *event,
4085 struct pt_regs *regs)
4086{
4087 if (regs) {
4088 if (event->attr.exclude_user && user_mode(regs))
4089 return 1;
4090
4091 if (event->attr.exclude_kernel && !user_mode(regs))
4092 return 1;
4093 }
4094
4095 return 0;
4096}
4097
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004098static int perf_swevent_match(struct perf_event *event,
4099 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08004100 u32 event_id,
4101 struct perf_sample_data *data,
4102 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004103{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004104 if (event->attr.type != type)
4105 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004106
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004107 if (event->attr.config != event_id)
4108 return 0;
4109
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004110 if (perf_exclude_event(event, regs))
4111 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004112
4113 return 1;
4114}
4115
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004116static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004117{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004118 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004119
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004120 return hash_64(val, SWEVENT_HLIST_BITS);
4121}
4122
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004123static inline struct hlist_head *
4124__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004125{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004126 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004127
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004128 return &hlist->heads[hash];
4129}
4130
4131/* For the read side: events when they trigger */
4132static inline struct hlist_head *
4133find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
4134{
4135 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004136
4137 hlist = rcu_dereference(ctx->swevent_hlist);
4138 if (!hlist)
4139 return NULL;
4140
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004141 return __find_swevent_head(hlist, type, event_id);
4142}
4143
4144/* For the event head insertion and removal in the hlist */
4145static inline struct hlist_head *
4146find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
4147{
4148 struct swevent_hlist *hlist;
4149 u32 event_id = event->attr.config;
4150 u64 type = event->attr.type;
4151
4152 /*
4153 * Event scheduling is always serialized against hlist allocation
4154 * and release. Which makes the protected version suitable here.
4155 * The context lock guarantees that.
4156 */
4157 hlist = rcu_dereference_protected(ctx->swevent_hlist,
4158 lockdep_is_held(&event->ctx->lock));
4159 if (!hlist)
4160 return NULL;
4161
4162 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004163}
4164
4165static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4166 u64 nr, int nmi,
4167 struct perf_sample_data *data,
4168 struct pt_regs *regs)
4169{
4170 struct perf_cpu_context *cpuctx;
4171 struct perf_event *event;
4172 struct hlist_node *node;
4173 struct hlist_head *head;
4174
4175 cpuctx = &__get_cpu_var(perf_cpu_context);
4176
4177 rcu_read_lock();
4178
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004179 head = find_swevent_head_rcu(cpuctx, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004180
4181 if (!head)
4182 goto end;
4183
4184 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08004185 if (perf_swevent_match(event, type, event_id, data, regs))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004186 perf_swevent_add(event, nr, nmi, data, regs);
4187 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004188end:
4189 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004190}
4191
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004192int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004193{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004194 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004195 int rctx;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004196
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004197 if (in_nmi())
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004198 rctx = 3;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004199 else if (in_irq())
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004200 rctx = 2;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004201 else if (in_softirq())
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004202 rctx = 1;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004203 else
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004204 rctx = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004205
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004206 if (cpuctx->recursion[rctx])
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004207 return -1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004208
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004209 cpuctx->recursion[rctx]++;
4210 barrier();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004211
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004212 return rctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004213}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004214EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004215
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004216void perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004217{
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004218 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4219 barrier();
Frederic Weisbeckerfe612672009-11-24 20:38:22 +01004220 cpuctx->recursion[rctx]--;
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004221}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004222EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004223
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004224
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004225void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4226 struct pt_regs *regs, u64 addr)
4227{
Ingo Molnara4234bf2009-11-23 10:57:59 +01004228 struct perf_sample_data data;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004229 int rctx;
4230
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004231 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004232 rctx = perf_swevent_get_recursion_context();
4233 if (rctx < 0)
4234 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004235
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004236 perf_sample_data_init(&data, addr);
Ingo Molnara4234bf2009-11-23 10:57:59 +01004237
4238 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004239
4240 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004241 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004242}
4243
4244static void perf_swevent_read(struct perf_event *event)
4245{
4246}
4247
4248static int perf_swevent_enable(struct perf_event *event)
4249{
4250 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004251 struct perf_cpu_context *cpuctx;
4252 struct hlist_head *head;
4253
4254 cpuctx = &__get_cpu_var(perf_cpu_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004255
4256 if (hwc->sample_period) {
4257 hwc->last_period = hwc->sample_period;
4258 perf_swevent_set_period(event);
4259 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004260
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004261 head = find_swevent_head(cpuctx, event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004262 if (WARN_ON_ONCE(!head))
4263 return -EINVAL;
4264
4265 hlist_add_head_rcu(&event->hlist_entry, head);
4266
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004267 return 0;
4268}
4269
4270static void perf_swevent_disable(struct perf_event *event)
4271{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004272 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004273}
4274
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004275static void perf_swevent_void(struct perf_event *event)
4276{
4277}
4278
4279static int perf_swevent_int(struct perf_event *event)
4280{
4281 return 0;
4282}
4283
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004284static const struct pmu perf_ops_generic = {
4285 .enable = perf_swevent_enable,
4286 .disable = perf_swevent_disable,
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004287 .start = perf_swevent_int,
4288 .stop = perf_swevent_void,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004289 .read = perf_swevent_read,
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004290 .unthrottle = perf_swevent_void, /* hwc->interrupts already reset */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004291};
4292
4293/*
4294 * hrtimer based swevent callback
4295 */
4296
4297static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4298{
4299 enum hrtimer_restart ret = HRTIMER_RESTART;
4300 struct perf_sample_data data;
4301 struct pt_regs *regs;
4302 struct perf_event *event;
4303 u64 period;
4304
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004305 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004306 event->pmu->read(event);
4307
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004308 perf_sample_data_init(&data, 0);
Xiao Guangrong59d069e2009-12-01 17:30:08 +08004309 data.period = event->hw.last_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004310 regs = get_irq_regs();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004311
Frederic Weisbeckerdf8290b2010-04-09 00:28:14 +02004312 if (regs && !perf_exclude_event(event, regs)) {
Soeren Sandmann54f44072009-10-22 18:34:08 +02004313 if (!(event->attr.exclude_idle && current->pid == 0))
4314 if (perf_event_overflow(event, 0, &data, regs))
4315 ret = HRTIMER_NORESTART;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004316 }
4317
4318 period = max_t(u64, 10000, event->hw.sample_period);
4319 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4320
4321 return ret;
4322}
4323
Soeren Sandmann721a6692009-09-15 14:33:08 +02004324static void perf_swevent_start_hrtimer(struct perf_event *event)
4325{
4326 struct hw_perf_event *hwc = &event->hw;
4327
4328 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4329 hwc->hrtimer.function = perf_swevent_hrtimer;
4330 if (hwc->sample_period) {
4331 u64 period;
4332
4333 if (hwc->remaining) {
4334 if (hwc->remaining < 0)
4335 period = 10000;
4336 else
4337 period = hwc->remaining;
4338 hwc->remaining = 0;
4339 } else {
4340 period = max_t(u64, 10000, hwc->sample_period);
4341 }
4342 __hrtimer_start_range_ns(&hwc->hrtimer,
4343 ns_to_ktime(period), 0,
4344 HRTIMER_MODE_REL, 0);
4345 }
4346}
4347
4348static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4349{
4350 struct hw_perf_event *hwc = &event->hw;
4351
4352 if (hwc->sample_period) {
4353 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4354 hwc->remaining = ktime_to_ns(remaining);
4355
4356 hrtimer_cancel(&hwc->hrtimer);
4357 }
4358}
4359
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004360/*
4361 * Software event: cpu wall time clock
4362 */
4363
4364static void cpu_clock_perf_event_update(struct perf_event *event)
4365{
4366 int cpu = raw_smp_processor_id();
4367 s64 prev;
4368 u64 now;
4369
4370 now = cpu_clock(cpu);
Xiao Guangrongec89a06f2009-12-09 11:30:36 +08004371 prev = atomic64_xchg(&event->hw.prev_count, now);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004372 atomic64_add(now - prev, &event->count);
4373}
4374
4375static int cpu_clock_perf_event_enable(struct perf_event *event)
4376{
4377 struct hw_perf_event *hwc = &event->hw;
4378 int cpu = raw_smp_processor_id();
4379
4380 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
Soeren Sandmann721a6692009-09-15 14:33:08 +02004381 perf_swevent_start_hrtimer(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004382
4383 return 0;
4384}
4385
4386static void cpu_clock_perf_event_disable(struct perf_event *event)
4387{
Soeren Sandmann721a6692009-09-15 14:33:08 +02004388 perf_swevent_cancel_hrtimer(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004389 cpu_clock_perf_event_update(event);
4390}
4391
4392static void cpu_clock_perf_event_read(struct perf_event *event)
4393{
4394 cpu_clock_perf_event_update(event);
4395}
4396
4397static const struct pmu perf_ops_cpu_clock = {
4398 .enable = cpu_clock_perf_event_enable,
4399 .disable = cpu_clock_perf_event_disable,
4400 .read = cpu_clock_perf_event_read,
4401};
4402
4403/*
4404 * Software event: task time clock
4405 */
4406
4407static void task_clock_perf_event_update(struct perf_event *event, u64 now)
4408{
4409 u64 prev;
4410 s64 delta;
4411
4412 prev = atomic64_xchg(&event->hw.prev_count, now);
4413 delta = now - prev;
4414 atomic64_add(delta, &event->count);
4415}
4416
4417static int task_clock_perf_event_enable(struct perf_event *event)
4418{
4419 struct hw_perf_event *hwc = &event->hw;
4420 u64 now;
4421
4422 now = event->ctx->time;
4423
4424 atomic64_set(&hwc->prev_count, now);
Soeren Sandmann721a6692009-09-15 14:33:08 +02004425
4426 perf_swevent_start_hrtimer(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004427
4428 return 0;
4429}
4430
4431static void task_clock_perf_event_disable(struct perf_event *event)
4432{
Soeren Sandmann721a6692009-09-15 14:33:08 +02004433 perf_swevent_cancel_hrtimer(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004434 task_clock_perf_event_update(event, event->ctx->time);
4435
4436}
4437
4438static void task_clock_perf_event_read(struct perf_event *event)
4439{
4440 u64 time;
4441
4442 if (!in_nmi()) {
4443 update_context_time(event->ctx);
4444 time = event->ctx->time;
4445 } else {
4446 u64 now = perf_clock();
4447 u64 delta = now - event->ctx->timestamp;
4448 time = event->ctx->time + delta;
4449 }
4450
4451 task_clock_perf_event_update(event, time);
4452}
4453
4454static const struct pmu perf_ops_task_clock = {
4455 .enable = task_clock_perf_event_enable,
4456 .disable = task_clock_perf_event_disable,
4457 .read = task_clock_perf_event_read,
4458};
4459
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004460/* Deref the hlist from the update side */
4461static inline struct swevent_hlist *
4462swevent_hlist_deref(struct perf_cpu_context *cpuctx)
4463{
4464 return rcu_dereference_protected(cpuctx->swevent_hlist,
4465 lockdep_is_held(&cpuctx->hlist_mutex));
4466}
4467
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004468static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4469{
4470 struct swevent_hlist *hlist;
4471
4472 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4473 kfree(hlist);
4474}
4475
4476static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
4477{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004478 struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004479
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004480 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004481 return;
4482
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004483 rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
4484 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4485}
4486
4487static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4488{
4489 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4490
4491 mutex_lock(&cpuctx->hlist_mutex);
4492
4493 if (!--cpuctx->hlist_refcount)
4494 swevent_hlist_release(cpuctx);
4495
4496 mutex_unlock(&cpuctx->hlist_mutex);
4497}
4498
4499static void swevent_hlist_put(struct perf_event *event)
4500{
4501 int cpu;
4502
4503 if (event->cpu != -1) {
4504 swevent_hlist_put_cpu(event, event->cpu);
4505 return;
4506 }
4507
4508 for_each_possible_cpu(cpu)
4509 swevent_hlist_put_cpu(event, cpu);
4510}
4511
4512static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4513{
4514 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4515 int err = 0;
4516
4517 mutex_lock(&cpuctx->hlist_mutex);
4518
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004519 if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004520 struct swevent_hlist *hlist;
4521
4522 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4523 if (!hlist) {
4524 err = -ENOMEM;
4525 goto exit;
4526 }
4527 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
4528 }
4529 cpuctx->hlist_refcount++;
4530 exit:
4531 mutex_unlock(&cpuctx->hlist_mutex);
4532
4533 return err;
4534}
4535
4536static int swevent_hlist_get(struct perf_event *event)
4537{
4538 int err;
4539 int cpu, failed_cpu;
4540
4541 if (event->cpu != -1)
4542 return swevent_hlist_get_cpu(event, event->cpu);
4543
4544 get_online_cpus();
4545 for_each_possible_cpu(cpu) {
4546 err = swevent_hlist_get_cpu(event, cpu);
4547 if (err) {
4548 failed_cpu = cpu;
4549 goto fail;
4550 }
4551 }
4552 put_online_cpus();
4553
4554 return 0;
4555 fail:
4556 for_each_possible_cpu(cpu) {
4557 if (cpu == failed_cpu)
4558 break;
4559 swevent_hlist_put_cpu(event, cpu);
4560 }
4561
4562 put_online_cpus();
4563 return err;
4564}
4565
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004566#ifdef CONFIG_EVENT_TRACING
4567
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004568static const struct pmu perf_ops_tracepoint = {
4569 .enable = perf_trace_enable,
4570 .disable = perf_trace_disable,
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004571 .start = perf_swevent_int,
4572 .stop = perf_swevent_void,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004573 .read = perf_swevent_read,
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004574 .unthrottle = perf_swevent_void,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004575};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004576
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004577static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02004578 struct perf_sample_data *data)
4579{
4580 void *record = data->raw->data;
4581
4582 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4583 return 1;
4584 return 0;
4585}
4586
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004587static int perf_tp_event_match(struct perf_event *event,
4588 struct perf_sample_data *data,
4589 struct pt_regs *regs)
4590{
Peter Zijlstra580d6072010-05-20 20:54:31 +02004591 /*
4592 * All tracepoints are from kernel-space.
4593 */
4594 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004595 return 0;
4596
4597 if (!perf_tp_filter_match(event, data))
4598 return 0;
4599
4600 return 1;
4601}
4602
4603void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
4604 struct pt_regs *regs, struct hlist_head *head)
4605{
4606 struct perf_sample_data data;
4607 struct perf_event *event;
4608 struct hlist_node *node;
4609
4610 struct perf_raw_record raw = {
4611 .size = entry_size,
4612 .data = record,
4613 };
4614
4615 perf_sample_data_init(&data, addr);
4616 data.raw = &raw;
4617
4618 rcu_read_lock();
4619 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4620 if (perf_tp_event_match(event, &data, regs))
4621 perf_swevent_add(event, count, 1, &data, regs);
4622 }
4623 rcu_read_unlock();
4624}
4625EXPORT_SYMBOL_GPL(perf_tp_event);
4626
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004627static void tp_perf_event_destroy(struct perf_event *event)
4628{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004629 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004630}
4631
4632static const struct pmu *tp_perf_event_init(struct perf_event *event)
4633{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004634 int err;
4635
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004636 /*
4637 * Raw tracepoint data is a severe data leak, only allow root to
4638 * have these.
4639 */
4640 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4641 perf_paranoid_tracepoint_raw() &&
4642 !capable(CAP_SYS_ADMIN))
4643 return ERR_PTR(-EPERM);
4644
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004645 err = perf_trace_init(event);
4646 if (err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004647 return NULL;
4648
4649 event->destroy = tp_perf_event_destroy;
4650
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004651 return &perf_ops_tracepoint;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004652}
Li Zefan6fb29152009-10-15 11:21:42 +08004653
4654static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4655{
4656 char *filter_str;
4657 int ret;
4658
4659 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4660 return -EINVAL;
4661
4662 filter_str = strndup_user(arg, PAGE_SIZE);
4663 if (IS_ERR(filter_str))
4664 return PTR_ERR(filter_str);
4665
4666 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4667
4668 kfree(filter_str);
4669 return ret;
4670}
4671
4672static void perf_event_free_filter(struct perf_event *event)
4673{
4674 ftrace_profile_free_filter(event);
4675}
4676
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004677#else
Li Zefan6fb29152009-10-15 11:21:42 +08004678
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004679static const struct pmu *tp_perf_event_init(struct perf_event *event)
4680{
4681 return NULL;
4682}
Li Zefan6fb29152009-10-15 11:21:42 +08004683
4684static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4685{
4686 return -ENOENT;
4687}
4688
4689static void perf_event_free_filter(struct perf_event *event)
4690{
4691}
4692
Li Zefan07b139c2009-12-21 14:27:35 +08004693#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004694
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004695#ifdef CONFIG_HAVE_HW_BREAKPOINT
4696static void bp_perf_event_destroy(struct perf_event *event)
4697{
4698 release_bp_slot(event);
4699}
4700
4701static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4702{
4703 int err;
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004704
4705 err = register_perf_hw_breakpoint(bp);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004706 if (err)
4707 return ERR_PTR(err);
4708
4709 bp->destroy = bp_perf_event_destroy;
4710
4711 return &perf_ops_bp;
4712}
4713
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004714void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004715{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004716 struct perf_sample_data sample;
4717 struct pt_regs *regs = data;
4718
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004719 perf_sample_data_init(&sample, bp->attr.bp_addr);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004720
4721 if (!perf_exclude_event(bp, regs))
4722 perf_swevent_add(bp, 1, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004723}
4724#else
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004725static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4726{
4727 return NULL;
4728}
4729
4730void perf_bp_event(struct perf_event *bp, void *regs)
4731{
4732}
4733#endif
4734
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004735atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4736
4737static void sw_perf_event_destroy(struct perf_event *event)
4738{
4739 u64 event_id = event->attr.config;
4740
4741 WARN_ON(event->parent);
4742
4743 atomic_dec(&perf_swevent_enabled[event_id]);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004744 swevent_hlist_put(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004745}
4746
4747static const struct pmu *sw_perf_event_init(struct perf_event *event)
4748{
4749 const struct pmu *pmu = NULL;
4750 u64 event_id = event->attr.config;
4751
4752 /*
4753 * Software events (currently) can't in general distinguish
4754 * between user, kernel and hypervisor events.
4755 * However, context switches and cpu migrations are considered
4756 * to be kernel events, and page faults are never hypervisor
4757 * events.
4758 */
4759 switch (event_id) {
4760 case PERF_COUNT_SW_CPU_CLOCK:
4761 pmu = &perf_ops_cpu_clock;
4762
4763 break;
4764 case PERF_COUNT_SW_TASK_CLOCK:
4765 /*
4766 * If the user instantiates this as a per-cpu event,
4767 * use the cpu_clock event instead.
4768 */
4769 if (event->ctx->task)
4770 pmu = &perf_ops_task_clock;
4771 else
4772 pmu = &perf_ops_cpu_clock;
4773
4774 break;
4775 case PERF_COUNT_SW_PAGE_FAULTS:
4776 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4777 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4778 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4779 case PERF_COUNT_SW_CPU_MIGRATIONS:
Anton Blanchardf7d79862009-10-18 01:09:29 +00004780 case PERF_COUNT_SW_ALIGNMENT_FAULTS:
4781 case PERF_COUNT_SW_EMULATION_FAULTS:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004782 if (!event->parent) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004783 int err;
4784
4785 err = swevent_hlist_get(event);
4786 if (err)
4787 return ERR_PTR(err);
4788
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004789 atomic_inc(&perf_swevent_enabled[event_id]);
4790 event->destroy = sw_perf_event_destroy;
4791 }
4792 pmu = &perf_ops_generic;
4793 break;
4794 }
4795
4796 return pmu;
4797}
4798
4799/*
4800 * Allocate and initialize a event structure
4801 */
4802static struct perf_event *
4803perf_event_alloc(struct perf_event_attr *attr,
4804 int cpu,
4805 struct perf_event_context *ctx,
4806 struct perf_event *group_leader,
4807 struct perf_event *parent_event,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004808 perf_overflow_handler_t overflow_handler,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004809 gfp_t gfpflags)
4810{
4811 const struct pmu *pmu;
4812 struct perf_event *event;
4813 struct hw_perf_event *hwc;
4814 long err;
4815
4816 event = kzalloc(sizeof(*event), gfpflags);
4817 if (!event)
4818 return ERR_PTR(-ENOMEM);
4819
4820 /*
4821 * Single events are their own group leaders, with an
4822 * empty sibling list:
4823 */
4824 if (!group_leader)
4825 group_leader = event;
4826
4827 mutex_init(&event->child_mutex);
4828 INIT_LIST_HEAD(&event->child_list);
4829
4830 INIT_LIST_HEAD(&event->group_entry);
4831 INIT_LIST_HEAD(&event->event_entry);
4832 INIT_LIST_HEAD(&event->sibling_list);
4833 init_waitqueue_head(&event->waitq);
4834
4835 mutex_init(&event->mmap_mutex);
4836
4837 event->cpu = cpu;
4838 event->attr = *attr;
4839 event->group_leader = group_leader;
4840 event->pmu = NULL;
4841 event->ctx = ctx;
4842 event->oncpu = -1;
4843
4844 event->parent = parent_event;
4845
4846 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4847 event->id = atomic64_inc_return(&perf_event_id);
4848
4849 event->state = PERF_EVENT_STATE_INACTIVE;
4850
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004851 if (!overflow_handler && parent_event)
4852 overflow_handler = parent_event->overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02004853
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01004854 event->overflow_handler = overflow_handler;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02004855
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004856 if (attr->disabled)
4857 event->state = PERF_EVENT_STATE_OFF;
4858
4859 pmu = NULL;
4860
4861 hwc = &event->hw;
4862 hwc->sample_period = attr->sample_period;
4863 if (attr->freq && attr->sample_freq)
4864 hwc->sample_period = 1;
4865 hwc->last_period = hwc->sample_period;
4866
4867 atomic64_set(&hwc->period_left, hwc->sample_period);
4868
4869 /*
4870 * we currently do not support PERF_FORMAT_GROUP on inherited events
4871 */
4872 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4873 goto done;
4874
4875 switch (attr->type) {
4876 case PERF_TYPE_RAW:
4877 case PERF_TYPE_HARDWARE:
4878 case PERF_TYPE_HW_CACHE:
4879 pmu = hw_perf_event_init(event);
4880 break;
4881
4882 case PERF_TYPE_SOFTWARE:
4883 pmu = sw_perf_event_init(event);
4884 break;
4885
4886 case PERF_TYPE_TRACEPOINT:
4887 pmu = tp_perf_event_init(event);
4888 break;
4889
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02004890 case PERF_TYPE_BREAKPOINT:
4891 pmu = bp_perf_event_init(event);
4892 break;
4893
4894
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004895 default:
4896 break;
4897 }
4898done:
4899 err = 0;
4900 if (!pmu)
4901 err = -EINVAL;
4902 else if (IS_ERR(pmu))
4903 err = PTR_ERR(pmu);
4904
4905 if (err) {
4906 if (event->ns)
4907 put_pid_ns(event->ns);
4908 kfree(event);
4909 return ERR_PTR(err);
4910 }
4911
4912 event->pmu = pmu;
4913
4914 if (!event->parent) {
4915 atomic_inc(&nr_events);
4916 if (event->attr.mmap)
4917 atomic_inc(&nr_mmap_events);
4918 if (event->attr.comm)
4919 atomic_inc(&nr_comm_events);
4920 if (event->attr.task)
4921 atomic_inc(&nr_task_events);
4922 }
4923
4924 return event;
4925}
4926
4927static int perf_copy_attr(struct perf_event_attr __user *uattr,
4928 struct perf_event_attr *attr)
4929{
4930 u32 size;
4931 int ret;
4932
4933 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4934 return -EFAULT;
4935
4936 /*
4937 * zero the full structure, so that a short copy will be nice.
4938 */
4939 memset(attr, 0, sizeof(*attr));
4940
4941 ret = get_user(size, &uattr->size);
4942 if (ret)
4943 return ret;
4944
4945 if (size > PAGE_SIZE) /* silly large */
4946 goto err_size;
4947
4948 if (!size) /* abi compat */
4949 size = PERF_ATTR_SIZE_VER0;
4950
4951 if (size < PERF_ATTR_SIZE_VER0)
4952 goto err_size;
4953
4954 /*
4955 * If we're handed a bigger struct than we know of,
4956 * ensure all the unknown bits are 0 - i.e. new
4957 * user-space does not rely on any kernel feature
4958 * extensions we dont know about yet.
4959 */
4960 if (size > sizeof(*attr)) {
4961 unsigned char __user *addr;
4962 unsigned char __user *end;
4963 unsigned char val;
4964
4965 addr = (void __user *)uattr + sizeof(*attr);
4966 end = (void __user *)uattr + size;
4967
4968 for (; addr < end; addr++) {
4969 ret = get_user(val, addr);
4970 if (ret)
4971 return ret;
4972 if (val)
4973 goto err_size;
4974 }
4975 size = sizeof(*attr);
4976 }
4977
4978 ret = copy_from_user(attr, uattr, size);
4979 if (ret)
4980 return -EFAULT;
4981
4982 /*
4983 * If the type exists, the corresponding creation will verify
4984 * the attr->config.
4985 */
4986 if (attr->type >= PERF_TYPE_MAX)
4987 return -EINVAL;
4988
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05304989 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004990 return -EINVAL;
4991
4992 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4993 return -EINVAL;
4994
4995 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4996 return -EINVAL;
4997
4998out:
4999 return ret;
5000
5001err_size:
5002 put_user(sizeof(*attr), &uattr->size);
5003 ret = -E2BIG;
5004 goto out;
5005}
5006
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005007static int
5008perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005009{
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005010 struct perf_mmap_data *data = NULL, *old_data = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005011 int ret = -EINVAL;
5012
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005013 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005014 goto set;
5015
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005016 /* don't allow circular references */
5017 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005018 goto out;
5019
Peter Zijlstra0f139302010-05-20 14:35:15 +02005020 /*
5021 * Don't allow cross-cpu buffers
5022 */
5023 if (output_event->cpu != event->cpu)
5024 goto out;
5025
5026 /*
5027 * If its not a per-cpu buffer, it must be the same task.
5028 */
5029 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5030 goto out;
5031
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005032set:
5033 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005034 /* Can't redirect output if we've got an active mmap() */
5035 if (atomic_read(&event->mmap_count))
5036 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005037
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005038 if (output_event) {
5039 /* get the buffer we want to redirect to */
5040 data = perf_mmap_data_get(output_event);
5041 if (!data)
5042 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005043 }
5044
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005045 old_data = event->data;
5046 rcu_assign_pointer(event->data, data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005047 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005048unlock:
5049 mutex_unlock(&event->mmap_mutex);
5050
5051 if (old_data)
5052 perf_mmap_data_put(old_data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005053out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005054 return ret;
5055}
5056
5057/**
5058 * sys_perf_event_open - open a performance event, associate it to a task/cpu
5059 *
5060 * @attr_uptr: event_id type attributes for monitoring/sampling
5061 * @pid: target pid
5062 * @cpu: target cpu
5063 * @group_fd: group leader event fd
5064 */
5065SYSCALL_DEFINE5(perf_event_open,
5066 struct perf_event_attr __user *, attr_uptr,
5067 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5068{
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005069 struct perf_event *event, *group_leader = NULL, *output_event = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005070 struct perf_event_attr attr;
5071 struct perf_event_context *ctx;
5072 struct file *event_file = NULL;
5073 struct file *group_file = NULL;
Al Viroea635c62010-05-26 17:40:29 -04005074 int event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005075 int fput_needed = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005076 int err;
5077
5078 /* for future expandability... */
5079 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5080 return -EINVAL;
5081
5082 err = perf_copy_attr(attr_uptr, &attr);
5083 if (err)
5084 return err;
5085
5086 if (!attr.exclude_kernel) {
5087 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5088 return -EACCES;
5089 }
5090
5091 if (attr.freq) {
5092 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5093 return -EINVAL;
5094 }
5095
Al Viroea635c62010-05-26 17:40:29 -04005096 event_fd = get_unused_fd_flags(O_RDWR);
5097 if (event_fd < 0)
5098 return event_fd;
5099
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005100 /*
5101 * Get the target context (task or percpu):
5102 */
5103 ctx = find_get_context(pid, cpu);
Al Viroea635c62010-05-26 17:40:29 -04005104 if (IS_ERR(ctx)) {
5105 err = PTR_ERR(ctx);
5106 goto err_fd;
5107 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005108
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005109 if (group_fd != -1) {
5110 group_leader = perf_fget_light(group_fd, &fput_needed);
5111 if (IS_ERR(group_leader)) {
5112 err = PTR_ERR(group_leader);
5113 goto err_put_context;
5114 }
5115 group_file = group_leader->filp;
5116 if (flags & PERF_FLAG_FD_OUTPUT)
5117 output_event = group_leader;
5118 if (flags & PERF_FLAG_FD_NO_GROUP)
5119 group_leader = NULL;
5120 }
5121
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005122 /*
5123 * Look up the group leader (we will attach this event to it):
5124 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005125 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005126 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005127
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005128 /*
5129 * Do not allow a recursive hierarchy (this new sibling
5130 * becoming part of another group-sibling):
5131 */
5132 if (group_leader->group_leader != group_leader)
5133 goto err_put_context;
5134 /*
5135 * Do not allow to attach to a group in a different
5136 * task or CPU context:
5137 */
5138 if (group_leader->ctx != ctx)
5139 goto err_put_context;
5140 /*
5141 * Only a group leader can be exclusive or pinned
5142 */
5143 if (attr.exclusive || attr.pinned)
5144 goto err_put_context;
5145 }
5146
5147 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005148 NULL, NULL, GFP_KERNEL);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005149 if (IS_ERR(event)) {
5150 err = PTR_ERR(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005151 goto err_put_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005152 }
5153
5154 if (output_event) {
5155 err = perf_event_set_output(event, output_event);
5156 if (err)
5157 goto err_free_put_context;
5158 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005159
Al Viroea635c62010-05-26 17:40:29 -04005160 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5161 if (IS_ERR(event_file)) {
5162 err = PTR_ERR(event_file);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005163 goto err_free_put_context;
Al Viroea635c62010-05-26 17:40:29 -04005164 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005165
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005166 event->filp = event_file;
5167 WARN_ON_ONCE(ctx->parent_ctx);
5168 mutex_lock(&ctx->mutex);
5169 perf_install_in_context(ctx, event, cpu);
5170 ++ctx->generation;
5171 mutex_unlock(&ctx->mutex);
5172
5173 event->owner = current;
5174 get_task_struct(current);
5175 mutex_lock(&current->perf_event_mutex);
5176 list_add_tail(&event->owner_entry, &current->perf_event_list);
5177 mutex_unlock(&current->perf_event_mutex);
5178
Peter Zijlstra8a495422010-05-27 15:47:49 +02005179 /*
5180 * Drop the reference on the group_event after placing the
5181 * new event on the sibling_list. This ensures destruction
5182 * of the group leader will find the pointer to itself in
5183 * perf_group_detach().
5184 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005185 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04005186 fd_install(event_fd, event_file);
5187 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005188
Al Viroea635c62010-05-26 17:40:29 -04005189err_free_put_context:
5190 free_event(event);
5191err_put_context:
5192 fput_light(group_file, fput_needed);
5193 put_ctx(ctx);
5194err_fd:
5195 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005196 return err;
5197}
5198
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005199/**
5200 * perf_event_create_kernel_counter
5201 *
5202 * @attr: attributes of the counter to create
5203 * @cpu: cpu in which the counter is bound
5204 * @pid: task to profile
5205 */
5206struct perf_event *
5207perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005208 pid_t pid,
5209 perf_overflow_handler_t overflow_handler)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005210{
5211 struct perf_event *event;
5212 struct perf_event_context *ctx;
5213 int err;
5214
5215 /*
5216 * Get the target context (task or percpu):
5217 */
5218
5219 ctx = find_get_context(pid, cpu);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005220 if (IS_ERR(ctx)) {
5221 err = PTR_ERR(ctx);
5222 goto err_exit;
5223 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005224
5225 event = perf_event_alloc(attr, cpu, ctx, NULL,
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005226 NULL, overflow_handler, GFP_KERNEL);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005227 if (IS_ERR(event)) {
5228 err = PTR_ERR(event);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005229 goto err_put_context;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005230 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005231
5232 event->filp = NULL;
5233 WARN_ON_ONCE(ctx->parent_ctx);
5234 mutex_lock(&ctx->mutex);
5235 perf_install_in_context(ctx, event, cpu);
5236 ++ctx->generation;
5237 mutex_unlock(&ctx->mutex);
5238
5239 event->owner = current;
5240 get_task_struct(current);
5241 mutex_lock(&current->perf_event_mutex);
5242 list_add_tail(&event->owner_entry, &current->perf_event_list);
5243 mutex_unlock(&current->perf_event_mutex);
5244
5245 return event;
5246
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01005247 err_put_context:
5248 put_ctx(ctx);
5249 err_exit:
5250 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02005251}
5252EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5253
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005254/*
5255 * inherit a event from parent task to child task:
5256 */
5257static struct perf_event *
5258inherit_event(struct perf_event *parent_event,
5259 struct task_struct *parent,
5260 struct perf_event_context *parent_ctx,
5261 struct task_struct *child,
5262 struct perf_event *group_leader,
5263 struct perf_event_context *child_ctx)
5264{
5265 struct perf_event *child_event;
5266
5267 /*
5268 * Instead of creating recursive hierarchies of events,
5269 * we link inherited events back to the original parent,
5270 * which has a filp for sure, which we use as the reference
5271 * count:
5272 */
5273 if (parent_event->parent)
5274 parent_event = parent_event->parent;
5275
5276 child_event = perf_event_alloc(&parent_event->attr,
5277 parent_event->cpu, child_ctx,
5278 group_leader, parent_event,
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005279 NULL, GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005280 if (IS_ERR(child_event))
5281 return child_event;
5282 get_ctx(child_ctx);
5283
5284 /*
5285 * Make the child state follow the state of the parent event,
5286 * not its attr.disabled bit. We hold the parent's mutex,
5287 * so we won't race with perf_event_{en, dis}able_family.
5288 */
5289 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
5290 child_event->state = PERF_EVENT_STATE_INACTIVE;
5291 else
5292 child_event->state = PERF_EVENT_STATE_OFF;
5293
Peter Zijlstra75c9f322010-01-29 09:04:26 +01005294 if (parent_event->attr.freq) {
5295 u64 sample_period = parent_event->hw.sample_period;
5296 struct hw_perf_event *hwc = &child_event->hw;
5297
5298 hwc->sample_period = sample_period;
5299 hwc->last_period = sample_period;
5300
5301 atomic64_set(&hwc->period_left, sample_period);
5302 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005303
Peter Zijlstra453f19e2009-11-20 22:19:43 +01005304 child_event->overflow_handler = parent_event->overflow_handler;
5305
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005306 /*
5307 * Link it up in the child's context:
5308 */
5309 add_event_to_ctx(child_event, child_ctx);
5310
5311 /*
5312 * Get a reference to the parent filp - we will fput it
5313 * when the child event exits. This is safe to do because
5314 * we are in the parent and we know that the filp still
5315 * exists and has a nonzero count:
5316 */
5317 atomic_long_inc(&parent_event->filp->f_count);
5318
5319 /*
5320 * Link this into the parent event's child list
5321 */
5322 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5323 mutex_lock(&parent_event->child_mutex);
5324 list_add_tail(&child_event->child_list, &parent_event->child_list);
5325 mutex_unlock(&parent_event->child_mutex);
5326
5327 return child_event;
5328}
5329
5330static int inherit_group(struct perf_event *parent_event,
5331 struct task_struct *parent,
5332 struct perf_event_context *parent_ctx,
5333 struct task_struct *child,
5334 struct perf_event_context *child_ctx)
5335{
5336 struct perf_event *leader;
5337 struct perf_event *sub;
5338 struct perf_event *child_ctr;
5339
5340 leader = inherit_event(parent_event, parent, parent_ctx,
5341 child, NULL, child_ctx);
5342 if (IS_ERR(leader))
5343 return PTR_ERR(leader);
5344 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5345 child_ctr = inherit_event(sub, parent, parent_ctx,
5346 child, leader, child_ctx);
5347 if (IS_ERR(child_ctr))
5348 return PTR_ERR(child_ctr);
5349 }
5350 return 0;
5351}
5352
5353static void sync_child_event(struct perf_event *child_event,
5354 struct task_struct *child)
5355{
5356 struct perf_event *parent_event = child_event->parent;
5357 u64 child_val;
5358
5359 if (child_event->attr.inherit_stat)
5360 perf_event_read_event(child_event, child);
5361
5362 child_val = atomic64_read(&child_event->count);
5363
5364 /*
5365 * Add back the child's count to the parent's count:
5366 */
5367 atomic64_add(child_val, &parent_event->count);
5368 atomic64_add(child_event->total_time_enabled,
5369 &parent_event->child_total_time_enabled);
5370 atomic64_add(child_event->total_time_running,
5371 &parent_event->child_total_time_running);
5372
5373 /*
5374 * Remove this event from the parent's list
5375 */
5376 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5377 mutex_lock(&parent_event->child_mutex);
5378 list_del_init(&child_event->child_list);
5379 mutex_unlock(&parent_event->child_mutex);
5380
5381 /*
5382 * Release the parent event, if this was the last
5383 * reference to it.
5384 */
5385 fput(parent_event->filp);
5386}
5387
5388static void
5389__perf_event_exit_task(struct perf_event *child_event,
5390 struct perf_event_context *child_ctx,
5391 struct task_struct *child)
5392{
5393 struct perf_event *parent_event;
5394
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005395 perf_event_remove_from_context(child_event);
5396
5397 parent_event = child_event->parent;
5398 /*
5399 * It can happen that parent exits first, and has events
5400 * that are still around due to the child reference. These
5401 * events need to be zapped - but otherwise linger.
5402 */
5403 if (parent_event) {
5404 sync_child_event(child_event, child);
5405 free_event(child_event);
5406 }
5407}
5408
5409/*
5410 * When a child task exits, feed back event values to parent events.
5411 */
5412void perf_event_exit_task(struct task_struct *child)
5413{
5414 struct perf_event *child_event, *tmp;
5415 struct perf_event_context *child_ctx;
5416 unsigned long flags;
5417
5418 if (likely(!child->perf_event_ctxp)) {
5419 perf_event_task(child, NULL, 0);
5420 return;
5421 }
5422
5423 local_irq_save(flags);
5424 /*
5425 * We can't reschedule here because interrupts are disabled,
5426 * and either child is current or it is a task that can't be
5427 * scheduled, so we are now safe from rescheduling changing
5428 * our context.
5429 */
5430 child_ctx = child->perf_event_ctxp;
5431 __perf_event_task_sched_out(child_ctx);
5432
5433 /*
5434 * Take the context lock here so that if find_get_context is
5435 * reading child->perf_event_ctxp, we wait until it has
5436 * incremented the context's refcount before we do put_ctx below.
5437 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +01005438 raw_spin_lock(&child_ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005439 child->perf_event_ctxp = NULL;
5440 /*
5441 * If this context is a clone; unclone it so it can't get
5442 * swapped to another process while we're removing all
5443 * the events from it.
5444 */
5445 unclone_ctx(child_ctx);
Peter Zijlstra5e942bb2009-11-23 11:37:26 +01005446 update_context_time(child_ctx);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01005447 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005448
5449 /*
5450 * Report the task dead after unscheduling the events so that we
5451 * won't get any samples after PERF_RECORD_EXIT. We can however still
5452 * get a few PERF_RECORD_READ events.
5453 */
5454 perf_event_task(child, child_ctx, 0);
5455
5456 /*
5457 * We can recurse on the same lock type through:
5458 *
5459 * __perf_event_exit_task()
5460 * sync_child_event()
5461 * fput(parent_event->filp)
5462 * perf_release()
5463 * mutex_lock(&ctx->mutex)
5464 *
5465 * But since its the parent context it won't be the same instance.
5466 */
Peter Zijlstraa0507c82010-05-06 15:42:53 +02005467 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005468
5469again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005470 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5471 group_entry)
5472 __perf_event_exit_task(child_event, child_ctx, child);
5473
5474 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005475 group_entry)
5476 __perf_event_exit_task(child_event, child_ctx, child);
5477
5478 /*
5479 * If the last event was a group event, it will have appended all
5480 * its siblings to the list, but we obtained 'tmp' before that which
5481 * will still point to the list head terminating the iteration.
5482 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005483 if (!list_empty(&child_ctx->pinned_groups) ||
5484 !list_empty(&child_ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005485 goto again;
5486
5487 mutex_unlock(&child_ctx->mutex);
5488
5489 put_ctx(child_ctx);
5490}
5491
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005492static void perf_free_event(struct perf_event *event,
5493 struct perf_event_context *ctx)
5494{
5495 struct perf_event *parent = event->parent;
5496
5497 if (WARN_ON_ONCE(!parent))
5498 return;
5499
5500 mutex_lock(&parent->child_mutex);
5501 list_del_init(&event->child_list);
5502 mutex_unlock(&parent->child_mutex);
5503
5504 fput(parent->filp);
5505
Peter Zijlstra8a495422010-05-27 15:47:49 +02005506 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005507 list_del_event(event, ctx);
5508 free_event(event);
5509}
5510
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005511/*
5512 * free an unexposed, unused context as created by inheritance by
5513 * init_task below, used by fork() in case of fail.
5514 */
5515void perf_event_free_task(struct task_struct *task)
5516{
5517 struct perf_event_context *ctx = task->perf_event_ctxp;
5518 struct perf_event *event, *tmp;
5519
5520 if (!ctx)
5521 return;
5522
5523 mutex_lock(&ctx->mutex);
5524again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005525 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5526 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005527
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005528 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5529 group_entry)
5530 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005531
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005532 if (!list_empty(&ctx->pinned_groups) ||
5533 !list_empty(&ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005534 goto again;
5535
5536 mutex_unlock(&ctx->mutex);
5537
5538 put_ctx(ctx);
5539}
5540
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005541static int
5542inherit_task_group(struct perf_event *event, struct task_struct *parent,
5543 struct perf_event_context *parent_ctx,
5544 struct task_struct *child,
5545 int *inherited_all)
5546{
5547 int ret;
5548 struct perf_event_context *child_ctx = child->perf_event_ctxp;
5549
5550 if (!event->attr.inherit) {
5551 *inherited_all = 0;
5552 return 0;
5553 }
5554
5555 if (!child_ctx) {
5556 /*
5557 * This is executed from the parent task context, so
5558 * inherit events that have been marked for cloning.
5559 * First allocate and initialize a context for the
5560 * child.
5561 */
5562
5563 child_ctx = kzalloc(sizeof(struct perf_event_context),
5564 GFP_KERNEL);
5565 if (!child_ctx)
5566 return -ENOMEM;
5567
5568 __perf_event_init_context(child_ctx, child);
5569 child->perf_event_ctxp = child_ctx;
5570 get_task_struct(child);
5571 }
5572
5573 ret = inherit_group(event, parent, parent_ctx,
5574 child, child_ctx);
5575
5576 if (ret)
5577 *inherited_all = 0;
5578
5579 return ret;
5580}
5581
5582
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005583/*
5584 * Initialize the perf_event context in task_struct
5585 */
5586int perf_event_init_task(struct task_struct *child)
5587{
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005588 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005589 struct perf_event_context *cloned_ctx;
5590 struct perf_event *event;
5591 struct task_struct *parent = current;
5592 int inherited_all = 1;
5593 int ret = 0;
5594
5595 child->perf_event_ctxp = NULL;
5596
5597 mutex_init(&child->perf_event_mutex);
5598 INIT_LIST_HEAD(&child->perf_event_list);
5599
5600 if (likely(!parent->perf_event_ctxp))
5601 return 0;
5602
5603 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005604 * If the parent's context is a clone, pin it so it won't get
5605 * swapped under us.
5606 */
5607 parent_ctx = perf_pin_task_context(parent);
5608
5609 /*
5610 * No need to check if parent_ctx != NULL here; since we saw
5611 * it non-NULL earlier, the only reason for it to become NULL
5612 * is if we exit, and since we're currently in the middle of
5613 * a fork we can't be exiting at the same time.
5614 */
5615
5616 /*
5617 * Lock the parent list. No need to lock the child - not PID
5618 * hashed yet and not running, so nobody can access it.
5619 */
5620 mutex_lock(&parent_ctx->mutex);
5621
5622 /*
5623 * We dont have to disable NMIs - we are only looking at
5624 * the list, not manipulating it:
5625 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005626 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5627 ret = inherit_task_group(event, parent, parent_ctx, child,
5628 &inherited_all);
5629 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005630 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005631 }
5632
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005633 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5634 ret = inherit_task_group(event, parent, parent_ctx, child,
5635 &inherited_all);
5636 if (ret)
5637 break;
5638 }
5639
5640 child_ctx = child->perf_event_ctxp;
5641
Peter Zijlstra05cbaa22009-12-30 16:00:35 +01005642 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005643 /*
5644 * Mark the child context as a clone of the parent
5645 * context, or of whatever the parent is a clone of.
5646 * Note that if the parent is a clone, it could get
5647 * uncloned at any point, but that doesn't matter
5648 * because the list of events and the generation
5649 * count can't have changed since we took the mutex.
5650 */
5651 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5652 if (cloned_ctx) {
5653 child_ctx->parent_ctx = cloned_ctx;
5654 child_ctx->parent_gen = parent_ctx->parent_gen;
5655 } else {
5656 child_ctx->parent_ctx = parent_ctx;
5657 child_ctx->parent_gen = parent_ctx->generation;
5658 }
5659 get_ctx(child_ctx->parent_ctx);
5660 }
5661
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005662 mutex_unlock(&parent_ctx->mutex);
5663
5664 perf_unpin_context(parent_ctx);
5665
5666 return ret;
5667}
5668
Paul Mackerras220b1402010-03-10 20:45:52 +11005669static void __init perf_event_init_all_cpus(void)
5670{
5671 int cpu;
5672 struct perf_cpu_context *cpuctx;
5673
5674 for_each_possible_cpu(cpu) {
5675 cpuctx = &per_cpu(perf_cpu_context, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005676 mutex_init(&cpuctx->hlist_mutex);
Paul Mackerras220b1402010-03-10 20:45:52 +11005677 __perf_event_init_context(&cpuctx->ctx, NULL);
5678 }
5679}
5680
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005681static void __cpuinit perf_event_init_cpu(int cpu)
5682{
5683 struct perf_cpu_context *cpuctx;
5684
5685 cpuctx = &per_cpu(perf_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005686
5687 spin_lock(&perf_resource_lock);
5688 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5689 spin_unlock(&perf_resource_lock);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005690
5691 mutex_lock(&cpuctx->hlist_mutex);
5692 if (cpuctx->hlist_refcount > 0) {
5693 struct swevent_hlist *hlist;
5694
5695 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5696 WARN_ON_ONCE(!hlist);
5697 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
5698 }
5699 mutex_unlock(&cpuctx->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005700}
5701
5702#ifdef CONFIG_HOTPLUG_CPU
5703static void __perf_event_exit_cpu(void *info)
5704{
5705 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5706 struct perf_event_context *ctx = &cpuctx->ctx;
5707 struct perf_event *event, *tmp;
5708
Frederic Weisbecker889ff012010-01-09 20:04:47 +01005709 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5710 __perf_event_remove_from_context(event);
5711 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005712 __perf_event_remove_from_context(event);
5713}
5714static void perf_event_exit_cpu(int cpu)
5715{
5716 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5717 struct perf_event_context *ctx = &cpuctx->ctx;
5718
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005719 mutex_lock(&cpuctx->hlist_mutex);
5720 swevent_hlist_release(cpuctx);
5721 mutex_unlock(&cpuctx->hlist_mutex);
5722
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005723 mutex_lock(&ctx->mutex);
5724 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5725 mutex_unlock(&ctx->mutex);
5726}
5727#else
5728static inline void perf_event_exit_cpu(int cpu) { }
5729#endif
5730
5731static int __cpuinit
5732perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5733{
5734 unsigned int cpu = (long)hcpu;
5735
5736 switch (action) {
5737
5738 case CPU_UP_PREPARE:
5739 case CPU_UP_PREPARE_FROZEN:
5740 perf_event_init_cpu(cpu);
5741 break;
5742
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005743 case CPU_DOWN_PREPARE:
5744 case CPU_DOWN_PREPARE_FROZEN:
5745 perf_event_exit_cpu(cpu);
5746 break;
5747
5748 default:
5749 break;
5750 }
5751
5752 return NOTIFY_OK;
5753}
5754
5755/*
5756 * This has to have a higher priority than migration_notifier in sched.c.
5757 */
5758static struct notifier_block __cpuinitdata perf_cpu_nb = {
5759 .notifier_call = perf_cpu_notify,
5760 .priority = 20,
5761};
5762
5763void __init perf_event_init(void)
5764{
Paul Mackerras220b1402010-03-10 20:45:52 +11005765 perf_event_init_all_cpus();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005766 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5767 (void *)(long)smp_processor_id());
5768 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
5769 (void *)(long)smp_processor_id());
5770 register_cpu_notifier(&perf_cpu_nb);
5771}
5772
Andi Kleenc9be0a32010-01-05 12:47:58 +01005773static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
5774 struct sysdev_class_attribute *attr,
5775 char *buf)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005776{
5777 return sprintf(buf, "%d\n", perf_reserved_percpu);
5778}
5779
5780static ssize_t
5781perf_set_reserve_percpu(struct sysdev_class *class,
Andi Kleenc9be0a32010-01-05 12:47:58 +01005782 struct sysdev_class_attribute *attr,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005783 const char *buf,
5784 size_t count)
5785{
5786 struct perf_cpu_context *cpuctx;
5787 unsigned long val;
5788 int err, cpu, mpt;
5789
5790 err = strict_strtoul(buf, 10, &val);
5791 if (err)
5792 return err;
5793 if (val > perf_max_events)
5794 return -EINVAL;
5795
5796 spin_lock(&perf_resource_lock);
5797 perf_reserved_percpu = val;
5798 for_each_online_cpu(cpu) {
5799 cpuctx = &per_cpu(perf_cpu_context, cpu);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01005800 raw_spin_lock_irq(&cpuctx->ctx.lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005801 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5802 perf_max_events - perf_reserved_percpu);
5803 cpuctx->max_pertask = mpt;
Thomas Gleixnere625cce12009-11-17 18:02:06 +01005804 raw_spin_unlock_irq(&cpuctx->ctx.lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005805 }
5806 spin_unlock(&perf_resource_lock);
5807
5808 return count;
5809}
5810
Andi Kleenc9be0a32010-01-05 12:47:58 +01005811static ssize_t perf_show_overcommit(struct sysdev_class *class,
5812 struct sysdev_class_attribute *attr,
5813 char *buf)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005814{
5815 return sprintf(buf, "%d\n", perf_overcommit);
5816}
5817
5818static ssize_t
Andi Kleenc9be0a32010-01-05 12:47:58 +01005819perf_set_overcommit(struct sysdev_class *class,
5820 struct sysdev_class_attribute *attr,
5821 const char *buf, size_t count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005822{
5823 unsigned long val;
5824 int err;
5825
5826 err = strict_strtoul(buf, 10, &val);
5827 if (err)
5828 return err;
5829 if (val > 1)
5830 return -EINVAL;
5831
5832 spin_lock(&perf_resource_lock);
5833 perf_overcommit = val;
5834 spin_unlock(&perf_resource_lock);
5835
5836 return count;
5837}
5838
5839static SYSDEV_CLASS_ATTR(
5840 reserve_percpu,
5841 0644,
5842 perf_show_reserve_percpu,
5843 perf_set_reserve_percpu
5844 );
5845
5846static SYSDEV_CLASS_ATTR(
5847 overcommit,
5848 0644,
5849 perf_show_overcommit,
5850 perf_set_overcommit
5851 );
5852
5853static struct attribute *perfclass_attrs[] = {
5854 &attr_reserve_percpu.attr,
5855 &attr_overcommit.attr,
5856 NULL
5857};
5858
5859static struct attribute_group perfclass_attr_group = {
5860 .attrs = perfclass_attrs,
5861 .name = "perf_events",
5862};
5863
5864static int __init perf_event_sysfs_init(void)
5865{
5866 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
5867 &perfclass_attr_group);
5868}
5869device_initcall(perf_event_sysfs_init);