blob: 2d80824298a71902b2ef06ec3659ab7513444797 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
Ingo Molnare7e7ee22011-05-04 08:42:29 +02005 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
Peter Zijlstra90eec102015-11-16 11:08:45 +01006 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
Al Virod36b6912011-12-29 17:09:01 -05007 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
Peter Zijlstra2e80a822010-11-17 23:17:36 +010016#include <linux/idr.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020017#include <linux/file.h>
18#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020020#include <linux/hash.h>
Frederic Weisbecker12351ef2013-04-20 15:48:22 +020021#include <linux/tick.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020022#include <linux/sysfs.h>
23#include <linux/dcache.h>
24#include <linux/percpu.h>
25#include <linux/ptrace.h>
Peter Zijlstrac2774432010-12-08 15:29:02 +010026#include <linux/reboot.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020027#include <linux/vmstat.h>
Peter Zijlstraabe43402010-11-17 23:17:37 +010028#include <linux/device.h>
Paul Gortmaker6e5fdee2011-05-26 16:00:52 -040029#include <linux/export.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020030#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020031#include <linux/hardirq.h>
32#include <linux/rculist.h>
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
36#include <linux/kernel_stat.h>
Matt Fleming39bed6c2015-01-23 18:45:40 +000037#include <linux/cgroup.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020038#include <linux/perf_event.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040039#include <linux/trace_events.h>
Jason Wessel3c502e72010-11-04 17:33:01 -050040#include <linux/hw_breakpoint.h>
Jiri Olsac5ebced2012-08-07 15:20:40 +020041#include <linux/mm_types.h>
Yan, Zhengc464c762014-03-18 16:56:41 +080042#include <linux/module.h>
Peter Zijlstraf972eb62014-05-19 15:13:47 -040043#include <linux/mman.h>
Pawel Mollb3f20782014-06-13 16:03:32 +010044#include <linux/compat.h>
Alexei Starovoitov25415172015-03-25 12:49:20 -070045#include <linux/bpf.h>
46#include <linux/filter.h>
Alexander Shishkin375637b2016-04-27 18:44:46 +030047#include <linux/namei.h>
48#include <linux/parser.h>
Ingo Molnare6017572017-02-01 16:36:40 +010049#include <linux/sched/clock.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010050#include <linux/sched/mm.h>
Hari Bathinie4222672017-03-08 02:11:36 +053051#include <linux/proc_ns.h>
52#include <linux/mount.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020053
Frederic Weisbecker76369132011-05-19 19:55:04 +020054#include "internal.h"
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056#include <asm/irq_regs.h>
57
Peter Zijlstra272325c2015-04-15 11:41:58 +020058typedef int (*remote_function_f)(void *);
59
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010060struct remote_function_call {
Ingo Molnare7e7ee22011-05-04 08:42:29 +020061 struct task_struct *p;
Peter Zijlstra272325c2015-04-15 11:41:58 +020062 remote_function_f func;
Ingo Molnare7e7ee22011-05-04 08:42:29 +020063 void *info;
64 int ret;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010065};
66
67static void remote_function(void *data)
68{
69 struct remote_function_call *tfc = data;
70 struct task_struct *p = tfc->p;
71
72 if (p) {
Peter Zijlstra0da4cf32016-02-24 18:45:51 +010073 /* -EAGAIN */
74 if (task_cpu(p) != smp_processor_id())
75 return;
76
77 /*
78 * Now that we're on right CPU with IRQs disabled, we can test
79 * if we hit the right task without races.
80 */
81
82 tfc->ret = -ESRCH; /* No such (running) process */
83 if (p != current)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010084 return;
85 }
86
87 tfc->ret = tfc->func(tfc->info);
88}
89
90/**
91 * task_function_call - call a function on the cpu on which a task runs
92 * @p: the task to evaluate
93 * @func: the function to be called
94 * @info: the function call argument
95 *
96 * Calls the function @func when the task is currently running. This might
97 * be on the current CPU, which just calls the function directly
98 *
99 * returns: @func return value, or
100 * -ESRCH - when the process isn't running
101 * -EAGAIN - when the process moved away
102 */
103static int
Peter Zijlstra272325c2015-04-15 11:41:58 +0200104task_function_call(struct task_struct *p, remote_function_f func, void *info)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100105{
106 struct remote_function_call data = {
Ingo Molnare7e7ee22011-05-04 08:42:29 +0200107 .p = p,
108 .func = func,
109 .info = info,
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100110 .ret = -EAGAIN,
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100111 };
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100112 int ret;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100113
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100114 do {
115 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
116 if (!ret)
117 ret = data.ret;
118 } while (ret == -EAGAIN);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100119
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100120 return ret;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100121}
122
123/**
124 * cpu_function_call - call a function on the cpu
125 * @func: the function to be called
126 * @info: the function call argument
127 *
128 * Calls the function @func on the remote cpu.
129 *
130 * returns: @func return value or -ENXIO when the cpu is offline
131 */
Peter Zijlstra272325c2015-04-15 11:41:58 +0200132static int cpu_function_call(int cpu, remote_function_f func, void *info)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100133{
134 struct remote_function_call data = {
Ingo Molnare7e7ee22011-05-04 08:42:29 +0200135 .p = NULL,
136 .func = func,
137 .info = info,
138 .ret = -ENXIO, /* No such CPU */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100139 };
140
141 smp_call_function_single(cpu, remote_function, &data, 1);
142
143 return data.ret;
144}
145
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100146static inline struct perf_cpu_context *
147__get_cpu_context(struct perf_event_context *ctx)
148{
149 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
150}
151
152static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
153 struct perf_event_context *ctx)
154{
155 raw_spin_lock(&cpuctx->ctx.lock);
156 if (ctx)
157 raw_spin_lock(&ctx->lock);
158}
159
160static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
161 struct perf_event_context *ctx)
162{
163 if (ctx)
164 raw_spin_unlock(&ctx->lock);
165 raw_spin_unlock(&cpuctx->ctx.lock);
166}
167
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100168#define TASK_TOMBSTONE ((void *)-1L)
169
170static bool is_kernel_event(struct perf_event *event)
171{
Peter Zijlstraf47c02c2016-01-26 12:30:14 +0100172 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100173}
174
Peter Zijlstra39a43642016-01-11 12:46:35 +0100175/*
176 * On task ctx scheduling...
177 *
178 * When !ctx->nr_events a task context will not be scheduled. This means
179 * we can disable the scheduler hooks (for performance) without leaving
180 * pending task ctx state.
181 *
182 * This however results in two special cases:
183 *
184 * - removing the last event from a task ctx; this is relatively straight
185 * forward and is done in __perf_remove_from_context.
186 *
187 * - adding the first event to a task ctx; this is tricky because we cannot
188 * rely on ctx->is_active and therefore cannot use event_function_call().
189 * See perf_install_in_context().
190 *
Peter Zijlstra39a43642016-01-11 12:46:35 +0100191 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
192 */
193
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100194typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
195 struct perf_event_context *, void *);
196
197struct event_function_struct {
198 struct perf_event *event;
199 event_f func;
200 void *data;
201};
202
203static int event_function(void *info)
204{
205 struct event_function_struct *efs = info;
206 struct perf_event *event = efs->event;
207 struct perf_event_context *ctx = event->ctx;
208 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
209 struct perf_event_context *task_ctx = cpuctx->task_ctx;
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100210 int ret = 0;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100211
Frederic Weisbecker16444642017-11-06 16:01:24 +0100212 lockdep_assert_irqs_disabled();
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100213
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100214 perf_ctx_lock(cpuctx, task_ctx);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100215 /*
216 * Since we do the IPI call without holding ctx->lock things can have
217 * changed, double check we hit the task we set out to hit.
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100218 */
219 if (ctx->task) {
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100220 if (ctx->task != current) {
Peter Zijlstra0da4cf32016-02-24 18:45:51 +0100221 ret = -ESRCH;
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100222 goto unlock;
223 }
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100224
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100225 /*
226 * We only use event_function_call() on established contexts,
227 * and event_function() is only ever called when active (or
228 * rather, we'll have bailed in task_function_call() or the
229 * above ctx->task != current test), therefore we must have
230 * ctx->is_active here.
231 */
232 WARN_ON_ONCE(!ctx->is_active);
233 /*
234 * And since we have ctx->is_active, cpuctx->task_ctx must
235 * match.
236 */
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100237 WARN_ON_ONCE(task_ctx != ctx);
238 } else {
239 WARN_ON_ONCE(&cpuctx->ctx != ctx);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100240 }
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100241
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100242 efs->func(event, cpuctx, ctx, efs->data);
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100243unlock:
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100244 perf_ctx_unlock(cpuctx, task_ctx);
245
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100246 return ret;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100247}
248
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100249static void event_function_call(struct perf_event *event, event_f func, void *data)
Peter Zijlstra00179602015-11-30 16:26:35 +0100250{
251 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100252 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100253 struct event_function_struct efs = {
254 .event = event,
255 .func = func,
256 .data = data,
257 };
Peter Zijlstra00179602015-11-30 16:26:35 +0100258
Peter Zijlstrac97f4732016-01-14 10:51:03 +0100259 if (!event->parent) {
260 /*
261 * If this is a !child event, we must hold ctx::mutex to
262 * stabilize the the event->ctx relation. See
263 * perf_event_ctx_lock().
264 */
265 lockdep_assert_held(&ctx->mutex);
266 }
Peter Zijlstra00179602015-11-30 16:26:35 +0100267
268 if (!task) {
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100269 cpu_function_call(event->cpu, event_function, &efs);
Peter Zijlstra00179602015-11-30 16:26:35 +0100270 return;
271 }
272
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100273 if (task == TASK_TOMBSTONE)
274 return;
275
Peter Zijlstraa0963092016-02-24 18:45:50 +0100276again:
Peter Zijlstrafae3fde2016-01-11 15:00:50 +0100277 if (!task_function_call(task, event_function, &efs))
Peter Zijlstra00179602015-11-30 16:26:35 +0100278 return;
279
280 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra63b6da32016-01-14 16:05:37 +0100281 /*
282 * Reload the task pointer, it might have been changed by
283 * a concurrent perf_event_context_sched_out().
284 */
285 task = ctx->task;
Peter Zijlstraa0963092016-02-24 18:45:50 +0100286 if (task == TASK_TOMBSTONE) {
287 raw_spin_unlock_irq(&ctx->lock);
288 return;
Peter Zijlstra00179602015-11-30 16:26:35 +0100289 }
Peter Zijlstraa0963092016-02-24 18:45:50 +0100290 if (ctx->is_active) {
291 raw_spin_unlock_irq(&ctx->lock);
292 goto again;
293 }
294 func(event, NULL, ctx, data);
Peter Zijlstra00179602015-11-30 16:26:35 +0100295 raw_spin_unlock_irq(&ctx->lock);
296}
297
Peter Zijlstracca20942016-08-16 13:33:26 +0200298/*
299 * Similar to event_function_call() + event_function(), but hard assumes IRQs
300 * are already disabled and we're on the right CPU.
301 */
302static void event_function_local(struct perf_event *event, event_f func, void *data)
303{
304 struct perf_event_context *ctx = event->ctx;
305 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
306 struct task_struct *task = READ_ONCE(ctx->task);
307 struct perf_event_context *task_ctx = NULL;
308
Frederic Weisbecker16444642017-11-06 16:01:24 +0100309 lockdep_assert_irqs_disabled();
Peter Zijlstracca20942016-08-16 13:33:26 +0200310
311 if (task) {
312 if (task == TASK_TOMBSTONE)
313 return;
314
315 task_ctx = ctx;
316 }
317
318 perf_ctx_lock(cpuctx, task_ctx);
319
320 task = ctx->task;
321 if (task == TASK_TOMBSTONE)
322 goto unlock;
323
324 if (task) {
325 /*
326 * We must be either inactive or active and the right task,
327 * otherwise we're screwed, since we cannot IPI to somewhere
328 * else.
329 */
330 if (ctx->is_active) {
331 if (WARN_ON_ONCE(task != current))
332 goto unlock;
333
334 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
335 goto unlock;
336 }
337 } else {
338 WARN_ON_ONCE(&cpuctx->ctx != ctx);
339 }
340
341 func(event, cpuctx, ctx, data);
342unlock:
343 perf_ctx_unlock(cpuctx, task_ctx);
344}
345
Stephane Eraniane5d13672011-02-14 11:20:01 +0200346#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
347 PERF_FLAG_FD_OUTPUT |\
Yann Droneauda21b0b32014-01-05 21:36:33 +0100348 PERF_FLAG_PID_CGROUP |\
349 PERF_FLAG_FD_CLOEXEC)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200350
Stephane Eranianbce38cd2012-02-09 23:20:51 +0100351/*
352 * branch priv levels that need permission checks
353 */
354#define PERF_SAMPLE_BRANCH_PERM_PLM \
355 (PERF_SAMPLE_BRANCH_KERNEL |\
356 PERF_SAMPLE_BRANCH_HV)
357
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200358enum event_type_t {
359 EVENT_FLEXIBLE = 0x1,
360 EVENT_PINNED = 0x2,
Peter Zijlstra3cbaa592016-02-24 18:45:47 +0100361 EVENT_TIME = 0x4,
Alexander Shishkin487f05e2017-01-19 18:43:30 +0200362 /* see ctx_resched() for details */
363 EVENT_CPU = 0x8,
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200364 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
365};
366
Stephane Eraniane5d13672011-02-14 11:20:01 +0200367/*
368 * perf_sched_events : >0 events exist
369 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
370 */
Peter Zijlstra9107c892016-02-24 18:45:45 +0100371
372static void perf_sched_delayed(struct work_struct *work);
373DEFINE_STATIC_KEY_FALSE(perf_sched_events);
374static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
375static DEFINE_MUTEX(perf_sched_mutex);
376static atomic_t perf_sched_count;
377
Stephane Eraniane5d13672011-02-14 11:20:01 +0200378static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
Yan, Zhengba532502014-11-04 21:55:58 -0500379static DEFINE_PER_CPU(int, perf_sched_cb_usages);
Kan Liangf2fb6be2016-03-23 11:24:37 -0700380static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200381
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200382static atomic_t nr_mmap_events __read_mostly;
383static atomic_t nr_comm_events __read_mostly;
Hari Bathinie4222672017-03-08 02:11:36 +0530384static atomic_t nr_namespaces_events __read_mostly;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200385static atomic_t nr_task_events __read_mostly;
Frederic Weisbecker948b26b2013-08-02 18:29:55 +0200386static atomic_t nr_freq_events __read_mostly;
Adrian Hunter45ac1402015-07-21 12:44:02 +0300387static atomic_t nr_switch_events __read_mostly;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200388
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200389static LIST_HEAD(pmus);
390static DEFINE_MUTEX(pmus_lock);
391static struct srcu_struct pmus_srcu;
Thomas Gleixnera63fbed2017-05-24 10:15:34 +0200392static cpumask_var_t perf_online_mask;
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200393
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200394/*
395 * perf event paranoia level:
396 * -1 - not paranoid at all
397 * 0 - disallow raw tracepoint access for unpriv
398 * 1 - disallow cpu events for unpriv
399 * 2 - disallow kernel profiling for unpriv
400 */
Andy Lutomirski01610282016-05-09 15:48:51 -0700401int sysctl_perf_event_paranoid __read_mostly = 2;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200402
Frederic Weisbecker20443382011-03-31 03:33:29 +0200403/* Minimum for 512 kiB + 1 user control page */
404int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200405
406/*
407 * max perf event sample rate
408 */
Dave Hansen14c63f12013-06-21 08:51:36 -0700409#define DEFAULT_MAX_SAMPLE_RATE 100000
410#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
411#define DEFAULT_CPU_TIME_MAX_PERCENT 25
412
413int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
414
415static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
416static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
417
Peter Zijlstrad9494cb2013-10-17 15:36:19 +0200418static int perf_sample_allowed_ns __read_mostly =
419 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
Dave Hansen14c63f12013-06-21 08:51:36 -0700420
Geliang Tang18ab2cd2015-09-27 23:25:50 +0800421static void update_perf_cpu_limits(void)
Dave Hansen14c63f12013-06-21 08:51:36 -0700422{
423 u64 tmp = perf_sample_period_ns;
424
425 tmp *= sysctl_perf_cpu_time_max_percent;
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100426 tmp = div_u64(tmp, 100);
427 if (!tmp)
428 tmp = 1;
429
430 WRITE_ONCE(perf_sample_allowed_ns, tmp);
Dave Hansen14c63f12013-06-21 08:51:36 -0700431}
Peter Zijlstra163ec432011-02-16 11:22:34 +0100432
Stephane Eranian9e630202013-04-03 14:21:33 +0200433static int perf_rotate_context(struct perf_cpu_context *cpuctx);
434
Peter Zijlstra163ec432011-02-16 11:22:34 +0100435int perf_proc_update_handler(struct ctl_table *table, int write,
436 void __user *buffer, size_t *lenp,
437 loff_t *ppos)
438{
Knut Petersen723478c2013-09-25 14:29:37 +0200439 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Peter Zijlstra163ec432011-02-16 11:22:34 +0100440
441 if (ret || !write)
442 return ret;
443
Kan Liangab7fdef2016-05-03 00:26:06 -0700444 /*
445 * If throttling is disabled don't allow the write:
446 */
447 if (sysctl_perf_cpu_time_max_percent == 100 ||
448 sysctl_perf_cpu_time_max_percent == 0)
449 return -EINVAL;
450
Peter Zijlstra163ec432011-02-16 11:22:34 +0100451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
Dave Hansen14c63f12013-06-21 08:51:36 -0700452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
453 update_perf_cpu_limits();
Peter Zijlstra163ec432011-02-16 11:22:34 +0100454
455 return 0;
456}
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200457
Dave Hansen14c63f12013-06-21 08:51:36 -0700458int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
459
460int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
461 void __user *buffer, size_t *lenp,
462 loff_t *ppos)
463{
Tan Xiaojun1572e452017-02-23 14:04:39 +0800464 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Dave Hansen14c63f12013-06-21 08:51:36 -0700465
466 if (ret || !write)
467 return ret;
468
Peter Zijlstrab303e7c2016-04-04 09:57:40 +0200469 if (sysctl_perf_cpu_time_max_percent == 100 ||
470 sysctl_perf_cpu_time_max_percent == 0) {
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100471 printk(KERN_WARNING
472 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
473 WRITE_ONCE(perf_sample_allowed_ns, 0);
474 } else {
475 update_perf_cpu_limits();
476 }
Dave Hansen14c63f12013-06-21 08:51:36 -0700477
478 return 0;
479}
480
481/*
482 * perf samples are done in some very critical code paths (NMIs).
483 * If they take too much CPU time, the system can lock up and not
484 * get any real work done. This will drop the sample rate when
485 * we detect that events are taking too long.
486 */
487#define NR_ACCUMULATED_SAMPLES 128
Peter Zijlstrad9494cb2013-10-17 15:36:19 +0200488static DEFINE_PER_CPU(u64, running_sample_length);
Dave Hansen14c63f12013-06-21 08:51:36 -0700489
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100490static u64 __report_avg;
491static u64 __report_allowed;
492
Peter Zijlstra6a02ad662014-02-03 18:11:08 +0100493static void perf_duration_warn(struct irq_work *w)
Dave Hansen14c63f12013-06-21 08:51:36 -0700494{
David Ahern0d87d7e2016-08-01 13:49:29 -0700495 printk_ratelimited(KERN_INFO
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100496 "perf: interrupt took too long (%lld > %lld), lowering "
497 "kernel.perf_event_max_sample_rate to %d\n",
498 __report_avg, __report_allowed,
499 sysctl_perf_event_sample_rate);
Peter Zijlstra6a02ad662014-02-03 18:11:08 +0100500}
501
502static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
503
504void perf_sample_event_took(u64 sample_len_ns)
505{
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100506 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
507 u64 running_len;
508 u64 avg_len;
509 u32 max;
Dave Hansen14c63f12013-06-21 08:51:36 -0700510
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100511 if (max_len == 0)
Dave Hansen14c63f12013-06-21 08:51:36 -0700512 return;
513
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100514 /* Decay the counter by 1 average sample. */
515 running_len = __this_cpu_read(running_sample_length);
516 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
517 running_len += sample_len_ns;
518 __this_cpu_write(running_sample_length, running_len);
Dave Hansen14c63f12013-06-21 08:51:36 -0700519
520 /*
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100521 * Note: this will be biased artifically low until we have
522 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
Dave Hansen14c63f12013-06-21 08:51:36 -0700523 * from having to maintain a count.
524 */
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100525 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
526 if (avg_len <= max_len)
Dave Hansen14c63f12013-06-21 08:51:36 -0700527 return;
528
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100529 __report_avg = avg_len;
530 __report_allowed = max_len;
Dave Hansen14c63f12013-06-21 08:51:36 -0700531
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100532 /*
533 * Compute a throttle threshold 25% below the current duration.
534 */
535 avg_len += avg_len / 4;
536 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
537 if (avg_len < max)
538 max /= (u32)avg_len;
539 else
540 max = 1;
541
542 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
543 WRITE_ONCE(max_samples_per_tick, max);
544
545 sysctl_perf_event_sample_rate = max * HZ;
Dave Hansen14c63f12013-06-21 08:51:36 -0700546 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
547
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100548 if (!irq_work_queue(&perf_duration_work)) {
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100549 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100550 "kernel.perf_event_max_sample_rate to %d\n",
Peter Zijlstra91a612e2016-03-17 15:17:35 +0100551 __report_avg, __report_allowed,
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100552 sysctl_perf_event_sample_rate);
553 }
Dave Hansen14c63f12013-06-21 08:51:36 -0700554}
555
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200556static atomic64_t perf_event_id;
557
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200558static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
559 enum event_type_t event_type);
560
561static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +0200562 enum event_type_t event_type,
563 struct task_struct *task);
564
565static void update_context_time(struct perf_event_context *ctx);
566static u64 perf_event_time(struct perf_event *event);
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200567
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200568void __weak perf_event_print_debug(void) { }
569
Matt Fleming84c79912010-10-03 21:41:13 +0100570extern __weak const char *perf_pmu_name(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200571{
Matt Fleming84c79912010-10-03 21:41:13 +0100572 return "pmu";
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200573}
574
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200575static inline u64 perf_clock(void)
576{
577 return local_clock();
578}
579
Peter Zijlstra34f43922015-02-20 14:05:38 +0100580static inline u64 perf_event_clock(struct perf_event *event)
581{
582 return event->clock();
583}
584
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +0200585/*
586 * State based event timekeeping...
587 *
588 * The basic idea is to use event->state to determine which (if any) time
589 * fields to increment with the current delta. This means we only need to
590 * update timestamps when we change state or when they are explicitly requested
591 * (read).
592 *
593 * Event groups make things a little more complicated, but not terribly so. The
594 * rules for a group are that if the group leader is OFF the entire group is
595 * OFF, irrespecive of what the group member states are. This results in
596 * __perf_effective_state().
597 *
598 * A futher ramification is that when a group leader flips between OFF and
599 * !OFF, we need to update all group member times.
600 *
601 *
602 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
603 * need to make sure the relevant context time is updated before we try and
604 * update our timestamps.
605 */
606
607static __always_inline enum perf_event_state
608__perf_effective_state(struct perf_event *event)
609{
610 struct perf_event *leader = event->group_leader;
611
612 if (leader->state <= PERF_EVENT_STATE_OFF)
613 return leader->state;
614
615 return event->state;
616}
617
618static __always_inline void
619__perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
620{
621 enum perf_event_state state = __perf_effective_state(event);
622 u64 delta = now - event->tstamp;
623
624 *enabled = event->total_time_enabled;
625 if (state >= PERF_EVENT_STATE_INACTIVE)
626 *enabled += delta;
627
628 *running = event->total_time_running;
629 if (state >= PERF_EVENT_STATE_ACTIVE)
630 *running += delta;
631}
632
633static void perf_event_update_time(struct perf_event *event)
634{
635 u64 now = perf_event_time(event);
636
637 __perf_update_times(event, now, &event->total_time_enabled,
638 &event->total_time_running);
639 event->tstamp = now;
640}
641
642static void perf_event_update_sibling_time(struct perf_event *leader)
643{
644 struct perf_event *sibling;
645
646 list_for_each_entry(sibling, &leader->sibling_list, group_entry)
647 perf_event_update_time(sibling);
648}
649
650static void
651perf_event_set_state(struct perf_event *event, enum perf_event_state state)
652{
653 if (event->state == state)
654 return;
655
656 perf_event_update_time(event);
657 /*
658 * If a group leader gets enabled/disabled all its siblings
659 * are affected too.
660 */
661 if ((event->state < 0) ^ (state < 0))
662 perf_event_update_sibling_time(event);
663
664 WRITE_ONCE(event->state, state);
665}
666
Stephane Eraniane5d13672011-02-14 11:20:01 +0200667#ifdef CONFIG_CGROUP_PERF
668
Stephane Eraniane5d13672011-02-14 11:20:01 +0200669static inline bool
670perf_cgroup_match(struct perf_event *event)
671{
672 struct perf_event_context *ctx = event->ctx;
673 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
674
Tejun Heoef824fa2013-04-08 19:00:38 -0700675 /* @event doesn't care about cgroup */
676 if (!event->cgrp)
677 return true;
678
679 /* wants specific cgroup scope but @cpuctx isn't associated with any */
680 if (!cpuctx->cgrp)
681 return false;
682
683 /*
684 * Cgroup scoping is recursive. An event enabled for a cgroup is
685 * also enabled for all its descendant cgroups. If @cpuctx's
686 * cgroup is a descendant of @event's (the test covers identity
687 * case), it's a match.
688 */
689 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
690 event->cgrp->css.cgroup);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200691}
692
Stephane Eraniane5d13672011-02-14 11:20:01 +0200693static inline void perf_detach_cgroup(struct perf_event *event)
694{
Zefan Li4e2ba652014-09-19 16:53:14 +0800695 css_put(&event->cgrp->css);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200696 event->cgrp = NULL;
697}
698
699static inline int is_cgroup_event(struct perf_event *event)
700{
701 return event->cgrp != NULL;
702}
703
704static inline u64 perf_cgroup_event_time(struct perf_event *event)
705{
706 struct perf_cgroup_info *t;
707
708 t = per_cpu_ptr(event->cgrp->info, event->cpu);
709 return t->time;
710}
711
712static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
713{
714 struct perf_cgroup_info *info;
715 u64 now;
716
717 now = perf_clock();
718
719 info = this_cpu_ptr(cgrp->info);
720
721 info->time += now - info->timestamp;
722 info->timestamp = now;
723}
724
725static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
726{
727 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
728 if (cgrp_out)
729 __update_cgrp_time(cgrp_out);
730}
731
732static inline void update_cgrp_time_from_event(struct perf_event *event)
733{
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200734 struct perf_cgroup *cgrp;
735
Stephane Eraniane5d13672011-02-14 11:20:01 +0200736 /*
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200737 * ensure we access cgroup data only when needed and
738 * when we know the cgroup is pinned (css_get)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200739 */
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200740 if (!is_cgroup_event(event))
Stephane Eraniane5d13672011-02-14 11:20:01 +0200741 return;
742
Stephane Eranian614e4c42015-11-12 11:00:04 +0100743 cgrp = perf_cgroup_from_task(current, event->ctx);
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200744 /*
745 * Do not update time when cgroup is not active
746 */
leilei.line6a520332017-09-29 13:54:44 +0800747 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200748 __update_cgrp_time(event->cgrp);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200749}
750
751static inline void
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200752perf_cgroup_set_timestamp(struct task_struct *task,
753 struct perf_event_context *ctx)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200754{
755 struct perf_cgroup *cgrp;
756 struct perf_cgroup_info *info;
757
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200758 /*
759 * ctx->lock held by caller
760 * ensure we do not access cgroup data
761 * unless we have the cgroup pinned (css_get)
762 */
763 if (!task || !ctx->nr_cgroups)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200764 return;
765
Stephane Eranian614e4c42015-11-12 11:00:04 +0100766 cgrp = perf_cgroup_from_task(task, ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200767 info = this_cpu_ptr(cgrp->info);
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200768 info->timestamp = ctx->timestamp;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200769}
770
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800771static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
772
Stephane Eraniane5d13672011-02-14 11:20:01 +0200773#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
774#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
775
776/*
777 * reschedule events based on the cgroup constraint of task.
778 *
779 * mode SWOUT : schedule out everything
780 * mode SWIN : schedule in based on cgroup for next
781 */
Geliang Tang18ab2cd2015-09-27 23:25:50 +0800782static void perf_cgroup_switch(struct task_struct *task, int mode)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200783{
784 struct perf_cpu_context *cpuctx;
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800785 struct list_head *list;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200786 unsigned long flags;
787
788 /*
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800789 * Disable interrupts and preemption to avoid this CPU's
790 * cgrp_cpuctx_entry to change under us.
Stephane Eraniane5d13672011-02-14 11:20:01 +0200791 */
792 local_irq_save(flags);
793
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800794 list = this_cpu_ptr(&cgrp_cpuctx_list);
795 list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
796 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200797
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800798 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
799 perf_pmu_disable(cpuctx->ctx.pmu);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200800
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800801 if (mode & PERF_CGROUP_SWOUT) {
802 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
803 /*
804 * must not be done before ctxswout due
805 * to event_filter_match() in event_sched_out()
806 */
807 cpuctx->cgrp = NULL;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200808 }
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800809
810 if (mode & PERF_CGROUP_SWIN) {
811 WARN_ON_ONCE(cpuctx->cgrp);
812 /*
813 * set cgrp before ctxsw in to allow
814 * event_filter_match() to not have to pass
815 * task around
816 * we pass the cpuctx->ctx to perf_cgroup_from_task()
817 * because cgorup events are only per-cpu
818 */
819 cpuctx->cgrp = perf_cgroup_from_task(task,
820 &cpuctx->ctx);
821 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
822 }
823 perf_pmu_enable(cpuctx->ctx.pmu);
824 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200825 }
826
Stephane Eraniane5d13672011-02-14 11:20:01 +0200827 local_irq_restore(flags);
828}
829
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200830static inline void perf_cgroup_sched_out(struct task_struct *task,
831 struct task_struct *next)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200832{
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200833 struct perf_cgroup *cgrp1;
834 struct perf_cgroup *cgrp2 = NULL;
835
Stephane Eranianddaaf4e2015-11-12 11:00:03 +0100836 rcu_read_lock();
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200837 /*
838 * we come here when we know perf_cgroup_events > 0
Stephane Eranian614e4c42015-11-12 11:00:04 +0100839 * we do not need to pass the ctx here because we know
840 * we are holding the rcu lock
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200841 */
Stephane Eranian614e4c42015-11-12 11:00:04 +0100842 cgrp1 = perf_cgroup_from_task(task, NULL);
Peter Zijlstra70a01652016-01-08 09:29:16 +0100843 cgrp2 = perf_cgroup_from_task(next, NULL);
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200844
845 /*
846 * only schedule out current cgroup events if we know
847 * that we are switching to a different cgroup. Otherwise,
848 * do no touch the cgroup events.
849 */
850 if (cgrp1 != cgrp2)
851 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
Stephane Eranianddaaf4e2015-11-12 11:00:03 +0100852
853 rcu_read_unlock();
Stephane Eraniane5d13672011-02-14 11:20:01 +0200854}
855
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200856static inline void perf_cgroup_sched_in(struct task_struct *prev,
857 struct task_struct *task)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200858{
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200859 struct perf_cgroup *cgrp1;
860 struct perf_cgroup *cgrp2 = NULL;
861
Stephane Eranianddaaf4e2015-11-12 11:00:03 +0100862 rcu_read_lock();
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200863 /*
864 * we come here when we know perf_cgroup_events > 0
Stephane Eranian614e4c42015-11-12 11:00:04 +0100865 * we do not need to pass the ctx here because we know
866 * we are holding the rcu lock
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200867 */
Stephane Eranian614e4c42015-11-12 11:00:04 +0100868 cgrp1 = perf_cgroup_from_task(task, NULL);
Stephane Eranian614e4c42015-11-12 11:00:04 +0100869 cgrp2 = perf_cgroup_from_task(prev, NULL);
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200870
871 /*
872 * only need to schedule in cgroup events if we are changing
873 * cgroup during ctxsw. Cgroup events were not scheduled
874 * out of ctxsw out if that was not the case.
875 */
876 if (cgrp1 != cgrp2)
877 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
Stephane Eranianddaaf4e2015-11-12 11:00:03 +0100878
879 rcu_read_unlock();
Stephane Eraniane5d13672011-02-14 11:20:01 +0200880}
881
882static inline int perf_cgroup_connect(int fd, struct perf_event *event,
883 struct perf_event_attr *attr,
884 struct perf_event *group_leader)
885{
886 struct perf_cgroup *cgrp;
887 struct cgroup_subsys_state *css;
Al Viro2903ff02012-08-28 12:52:22 -0400888 struct fd f = fdget(fd);
889 int ret = 0;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200890
Al Viro2903ff02012-08-28 12:52:22 -0400891 if (!f.file)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200892 return -EBADF;
893
Al Virob5830432014-10-31 01:22:04 -0400894 css = css_tryget_online_from_dir(f.file->f_path.dentry,
Tejun Heoec903c02014-05-13 12:11:01 -0400895 &perf_event_cgrp_subsys);
Li Zefan3db272c2011-03-03 14:25:37 +0800896 if (IS_ERR(css)) {
897 ret = PTR_ERR(css);
898 goto out;
899 }
Stephane Eraniane5d13672011-02-14 11:20:01 +0200900
901 cgrp = container_of(css, struct perf_cgroup, css);
902 event->cgrp = cgrp;
903
904 /*
905 * all events in a group must monitor
906 * the same cgroup because a task belongs
907 * to only one perf cgroup at a time
908 */
909 if (group_leader && group_leader->cgrp != cgrp) {
910 perf_detach_cgroup(event);
911 ret = -EINVAL;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200912 }
Li Zefan3db272c2011-03-03 14:25:37 +0800913out:
Al Viro2903ff02012-08-28 12:52:22 -0400914 fdput(f);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200915 return ret;
916}
917
918static inline void
919perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
920{
921 struct perf_cgroup_info *t;
922 t = per_cpu_ptr(event->cgrp->info, event->cpu);
923 event->shadow_ctx_time = now - t->timestamp;
924}
925
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -0700926/*
927 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
928 * cleared when last cgroup event is removed.
929 */
930static inline void
931list_update_cgroup_event(struct perf_event *event,
932 struct perf_event_context *ctx, bool add)
933{
934 struct perf_cpu_context *cpuctx;
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800935 struct list_head *cpuctx_entry;
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -0700936
937 if (!is_cgroup_event(event))
938 return;
939
940 if (add && ctx->nr_cgroups++)
941 return;
942 else if (!add && --ctx->nr_cgroups)
943 return;
944 /*
945 * Because cgroup events are always per-cpu events,
946 * this will always be called from the right CPU.
947 */
948 cpuctx = __get_cpu_context(ctx);
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800949 cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
950 /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
951 if (add) {
Tejun Heobe96b312017-10-28 09:49:37 -0700952 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
953
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800954 list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
Tejun Heobe96b312017-10-28 09:49:37 -0700955 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
956 cpuctx->cgrp = cgrp;
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800957 } else {
958 list_del(cpuctx_entry);
David Carrillo-Cisneros8fc31ce2016-12-04 00:46:17 -0800959 cpuctx->cgrp = NULL;
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -0800960 }
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -0700961}
962
Stephane Eraniane5d13672011-02-14 11:20:01 +0200963#else /* !CONFIG_CGROUP_PERF */
964
965static inline bool
966perf_cgroup_match(struct perf_event *event)
967{
968 return true;
969}
970
971static inline void perf_detach_cgroup(struct perf_event *event)
972{}
973
974static inline int is_cgroup_event(struct perf_event *event)
975{
976 return 0;
977}
978
Stephane Eraniane5d13672011-02-14 11:20:01 +0200979static inline void update_cgrp_time_from_event(struct perf_event *event)
980{
981}
982
983static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
984{
985}
986
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200987static inline void perf_cgroup_sched_out(struct task_struct *task,
988 struct task_struct *next)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200989{
990}
991
Stephane Eraniana8d757e2011-08-25 15:58:03 +0200992static inline void perf_cgroup_sched_in(struct task_struct *prev,
993 struct task_struct *task)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200994{
995}
996
997static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
998 struct perf_event_attr *attr,
999 struct perf_event *group_leader)
1000{
1001 return -EINVAL;
1002}
1003
1004static inline void
Stephane Eranian3f7cce32011-02-18 14:40:01 +02001005perf_cgroup_set_timestamp(struct task_struct *task,
1006 struct perf_event_context *ctx)
Stephane Eraniane5d13672011-02-14 11:20:01 +02001007{
1008}
1009
1010void
1011perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
1012{
1013}
1014
1015static inline void
1016perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
1017{
1018}
1019
1020static inline u64 perf_cgroup_event_time(struct perf_event *event)
1021{
1022 return 0;
1023}
1024
1025static inline void
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -07001026list_update_cgroup_event(struct perf_event *event,
1027 struct perf_event_context *ctx, bool add)
1028{
1029}
1030
Stephane Eraniane5d13672011-02-14 11:20:01 +02001031#endif
1032
Stephane Eranian9e630202013-04-03 14:21:33 +02001033/*
1034 * set default to be dependent on timer tick just
1035 * like original code
1036 */
1037#define PERF_CPU_HRTIMER (1000 / HZ)
1038/*
Masahiro Yamada8a1115f2017-03-09 16:16:31 -08001039 * function must be called with interrupts disabled
Stephane Eranian9e630202013-04-03 14:21:33 +02001040 */
Peter Zijlstra272325c2015-04-15 11:41:58 +02001041static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
Stephane Eranian9e630202013-04-03 14:21:33 +02001042{
1043 struct perf_cpu_context *cpuctx;
Stephane Eranian9e630202013-04-03 14:21:33 +02001044 int rotations = 0;
1045
Frederic Weisbecker16444642017-11-06 16:01:24 +01001046 lockdep_assert_irqs_disabled();
Stephane Eranian9e630202013-04-03 14:21:33 +02001047
1048 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
Stephane Eranian9e630202013-04-03 14:21:33 +02001049 rotations = perf_rotate_context(cpuctx);
1050
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001051 raw_spin_lock(&cpuctx->hrtimer_lock);
1052 if (rotations)
Stephane Eranian9e630202013-04-03 14:21:33 +02001053 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001054 else
1055 cpuctx->hrtimer_active = 0;
1056 raw_spin_unlock(&cpuctx->hrtimer_lock);
Stephane Eranian9e630202013-04-03 14:21:33 +02001057
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001058 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
Stephane Eranian9e630202013-04-03 14:21:33 +02001059}
1060
Peter Zijlstra272325c2015-04-15 11:41:58 +02001061static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
Stephane Eranian9e630202013-04-03 14:21:33 +02001062{
Peter Zijlstra272325c2015-04-15 11:41:58 +02001063 struct hrtimer *timer = &cpuctx->hrtimer;
Stephane Eranian9e630202013-04-03 14:21:33 +02001064 struct pmu *pmu = cpuctx->ctx.pmu;
Peter Zijlstra272325c2015-04-15 11:41:58 +02001065 u64 interval;
Stephane Eranian9e630202013-04-03 14:21:33 +02001066
1067 /* no multiplexing needed for SW PMU */
1068 if (pmu->task_ctx_nr == perf_sw_context)
1069 return;
1070
Stephane Eranian62b85632013-04-03 14:21:34 +02001071 /*
1072 * check default is sane, if not set then force to
1073 * default interval (1/tick)
1074 */
Peter Zijlstra272325c2015-04-15 11:41:58 +02001075 interval = pmu->hrtimer_interval_ms;
1076 if (interval < 1)
1077 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
Stephane Eranian62b85632013-04-03 14:21:34 +02001078
Peter Zijlstra272325c2015-04-15 11:41:58 +02001079 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
Stephane Eranian9e630202013-04-03 14:21:33 +02001080
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001081 raw_spin_lock_init(&cpuctx->hrtimer_lock);
1082 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Peter Zijlstra272325c2015-04-15 11:41:58 +02001083 timer->function = perf_mux_hrtimer_handler;
Stephane Eranian9e630202013-04-03 14:21:33 +02001084}
1085
Peter Zijlstra272325c2015-04-15 11:41:58 +02001086static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
Stephane Eranian9e630202013-04-03 14:21:33 +02001087{
Peter Zijlstra272325c2015-04-15 11:41:58 +02001088 struct hrtimer *timer = &cpuctx->hrtimer;
Stephane Eranian9e630202013-04-03 14:21:33 +02001089 struct pmu *pmu = cpuctx->ctx.pmu;
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001090 unsigned long flags;
Stephane Eranian9e630202013-04-03 14:21:33 +02001091
1092 /* not for SW PMU */
1093 if (pmu->task_ctx_nr == perf_sw_context)
Peter Zijlstra272325c2015-04-15 11:41:58 +02001094 return 0;
Stephane Eranian9e630202013-04-03 14:21:33 +02001095
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02001096 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1097 if (!cpuctx->hrtimer_active) {
1098 cpuctx->hrtimer_active = 1;
1099 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
1100 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
1101 }
1102 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
Stephane Eranian9e630202013-04-03 14:21:33 +02001103
Peter Zijlstra272325c2015-04-15 11:41:58 +02001104 return 0;
Stephane Eranian9e630202013-04-03 14:21:33 +02001105}
1106
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001107void perf_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001108{
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001109 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1110 if (!(*count)++)
1111 pmu->pmu_disable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001112}
1113
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001114void perf_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001115{
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001116 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1117 if (!--(*count))
1118 pmu->pmu_enable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001119}
1120
Mark Rutland2fde4f92015-01-07 15:01:54 +00001121static DEFINE_PER_CPU(struct list_head, active_ctx_list);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001122
1123/*
Mark Rutland2fde4f92015-01-07 15:01:54 +00001124 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1125 * perf_event_task_tick() are fully serialized because they're strictly cpu
1126 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1127 * disabled, while perf_event_task_tick is called from IRQ context.
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02001128 */
Mark Rutland2fde4f92015-01-07 15:01:54 +00001129static void perf_event_ctx_activate(struct perf_event_context *ctx)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001130{
Mark Rutland2fde4f92015-01-07 15:01:54 +00001131 struct list_head *head = this_cpu_ptr(&active_ctx_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001132
Frederic Weisbecker16444642017-11-06 16:01:24 +01001133 lockdep_assert_irqs_disabled();
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02001134
Mark Rutland2fde4f92015-01-07 15:01:54 +00001135 WARN_ON(!list_empty(&ctx->active_ctx_list));
1136
1137 list_add(&ctx->active_ctx_list, head);
1138}
1139
1140static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1141{
Frederic Weisbecker16444642017-11-06 16:01:24 +01001142 lockdep_assert_irqs_disabled();
Mark Rutland2fde4f92015-01-07 15:01:54 +00001143
1144 WARN_ON(list_empty(&ctx->active_ctx_list));
1145
1146 list_del_init(&ctx->active_ctx_list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001147}
1148
1149static void get_ctx(struct perf_event_context *ctx)
1150{
1151 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
1152}
1153
Yan, Zheng4af57ef2014-11-04 21:56:01 -05001154static void free_ctx(struct rcu_head *head)
1155{
1156 struct perf_event_context *ctx;
1157
1158 ctx = container_of(head, struct perf_event_context, rcu_head);
1159 kfree(ctx->task_ctx_data);
1160 kfree(ctx);
1161}
1162
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001163static void put_ctx(struct perf_event_context *ctx)
1164{
1165 if (atomic_dec_and_test(&ctx->refcount)) {
1166 if (ctx->parent_ctx)
1167 put_ctx(ctx->parent_ctx);
Peter Zijlstra63b6da32016-01-14 16:05:37 +01001168 if (ctx->task && ctx->task != TASK_TOMBSTONE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001169 put_task_struct(ctx->task);
Yan, Zheng4af57ef2014-11-04 21:56:01 -05001170 call_rcu(&ctx->rcu_head, free_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001171 }
1172}
1173
Peter Zijlstra211de6e2014-09-30 19:23:08 +02001174/*
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001175 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1176 * perf_pmu_migrate_context() we need some magic.
1177 *
1178 * Those places that change perf_event::ctx will hold both
1179 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1180 *
Peter Zijlstra8b10c5e2015-05-01 16:08:46 +02001181 * Lock ordering is by mutex address. There are two other sites where
1182 * perf_event_context::mutex nests and those are:
1183 *
1184 * - perf_event_exit_task_context() [ child , 0 ]
Peter Zijlstra8ba289b2016-01-26 13:06:56 +01001185 * perf_event_exit_event()
1186 * put_event() [ parent, 1 ]
Peter Zijlstra8b10c5e2015-05-01 16:08:46 +02001187 *
1188 * - perf_event_init_context() [ parent, 0 ]
1189 * inherit_task_group()
1190 * inherit_group()
1191 * inherit_event()
1192 * perf_event_alloc()
1193 * perf_init_event()
1194 * perf_try_init_event() [ child , 1 ]
1195 *
1196 * While it appears there is an obvious deadlock here -- the parent and child
1197 * nesting levels are inverted between the two. This is in fact safe because
1198 * life-time rules separate them. That is an exiting task cannot fork, and a
1199 * spawning task cannot (yet) exit.
1200 *
1201 * But remember that that these are parent<->child context relations, and
1202 * migration does not affect children, therefore these two orderings should not
1203 * interact.
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001204 *
1205 * The change in perf_event::ctx does not affect children (as claimed above)
1206 * because the sys_perf_event_open() case will install a new event and break
1207 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1208 * concerned with cpuctx and that doesn't have children.
1209 *
1210 * The places that change perf_event::ctx will issue:
1211 *
1212 * perf_remove_from_context();
1213 * synchronize_rcu();
1214 * perf_install_in_context();
1215 *
1216 * to affect the change. The remove_from_context() + synchronize_rcu() should
1217 * quiesce the event, after which we can install it in the new location. This
1218 * means that only external vectors (perf_fops, prctl) can perturb the event
1219 * while in transit. Therefore all such accessors should also acquire
1220 * perf_event_context::mutex to serialize against this.
1221 *
1222 * However; because event->ctx can change while we're waiting to acquire
1223 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1224 * function.
1225 *
1226 * Lock order:
Peter Zijlstra79c9ce52016-04-26 11:36:53 +02001227 * cred_guard_mutex
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001228 * task_struct::perf_event_mutex
1229 * perf_event_context::mutex
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001230 * perf_event::child_mutex;
Peter Zijlstra07c4a772016-01-26 12:15:37 +01001231 * perf_event_context::lock
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001232 * perf_event::mmap_mutex
1233 * mmap_sem
Peter Zijlstra82d94852018-01-09 13:10:30 +01001234 *
1235 * cpu_hotplug_lock
1236 * pmus_lock
1237 * cpuctx->mutex / perf_event_context::mutex
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001238 */
Peter Zijlstraa83fe282015-01-29 14:44:34 +01001239static struct perf_event_context *
1240perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001241{
1242 struct perf_event_context *ctx;
1243
1244again:
1245 rcu_read_lock();
Mark Rutland6aa7de02017-10-23 14:07:29 -07001246 ctx = READ_ONCE(event->ctx);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001247 if (!atomic_inc_not_zero(&ctx->refcount)) {
1248 rcu_read_unlock();
1249 goto again;
1250 }
1251 rcu_read_unlock();
1252
Peter Zijlstraa83fe282015-01-29 14:44:34 +01001253 mutex_lock_nested(&ctx->mutex, nesting);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001254 if (event->ctx != ctx) {
1255 mutex_unlock(&ctx->mutex);
1256 put_ctx(ctx);
1257 goto again;
1258 }
1259
1260 return ctx;
1261}
1262
Peter Zijlstraa83fe282015-01-29 14:44:34 +01001263static inline struct perf_event_context *
1264perf_event_ctx_lock(struct perf_event *event)
1265{
1266 return perf_event_ctx_lock_nested(event, 0);
1267}
1268
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001269static void perf_event_ctx_unlock(struct perf_event *event,
1270 struct perf_event_context *ctx)
1271{
1272 mutex_unlock(&ctx->mutex);
1273 put_ctx(ctx);
1274}
1275
1276/*
Peter Zijlstra211de6e2014-09-30 19:23:08 +02001277 * This must be done under the ctx->lock, such as to serialize against
1278 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1279 * calling scheduler related locks and ctx->lock nests inside those.
1280 */
1281static __must_check struct perf_event_context *
1282unclone_ctx(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001283{
Peter Zijlstra211de6e2014-09-30 19:23:08 +02001284 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1285
1286 lockdep_assert_held(&ctx->lock);
1287
1288 if (parent_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001289 ctx->parent_ctx = NULL;
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02001290 ctx->generation++;
Peter Zijlstra211de6e2014-09-30 19:23:08 +02001291
1292 return parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001293}
1294
Oleg Nesterov1d953112017-08-22 17:59:28 +02001295static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1296 enum pid_type type)
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001297{
Oleg Nesterov1d953112017-08-22 17:59:28 +02001298 u32 nr;
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001299 /*
1300 * only top level events have the pid namespace they were created in
1301 */
1302 if (event->parent)
1303 event = event->parent;
1304
Oleg Nesterov1d953112017-08-22 17:59:28 +02001305 nr = __task_pid_nr_ns(p, type, event->ns);
1306 /* avoid -1 if it is idle thread or runs in another ns */
1307 if (!nr && !pid_alive(p))
1308 nr = -1;
1309 return nr;
1310}
1311
1312static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1313{
1314 return perf_event_pid_type(event, p, __PIDTYPE_TGID);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001315}
1316
1317static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1318{
Oleg Nesterov1d953112017-08-22 17:59:28 +02001319 return perf_event_pid_type(event, p, PIDTYPE_PID);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001320}
1321
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001322/*
1323 * If we inherit events we want to return the parent event id
1324 * to userspace.
1325 */
1326static u64 primary_event_id(struct perf_event *event)
1327{
1328 u64 id = event->id;
1329
1330 if (event->parent)
1331 id = event->parent->id;
1332
1333 return id;
1334}
1335
1336/*
1337 * Get the perf_event_context for a task and lock it.
Peter Zijlstra63b6da32016-01-14 16:05:37 +01001338 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001339 * This has to cope with with the fact that until it is locked,
1340 * the context could get moved to another task.
1341 */
1342static struct perf_event_context *
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001343perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001344{
1345 struct perf_event_context *ctx;
1346
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001347retry:
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001348 /*
1349 * One of the few rules of preemptible RCU is that one cannot do
1350 * rcu_read_unlock() while holding a scheduler (or nested) lock when
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001351 * part of the read side critical section was irqs-enabled -- see
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001352 * rcu_read_unlock_special().
1353 *
1354 * Since ctx->lock nests under rq->lock we must ensure the entire read
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001355 * side critical section has interrupts disabled.
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001356 */
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001357 local_irq_save(*flags);
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001358 rcu_read_lock();
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001359 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001360 if (ctx) {
1361 /*
1362 * If this context is a clone of another, it might
1363 * get swapped for another underneath us by
1364 * perf_event_task_sched_out, though the
1365 * rcu_read_lock() protects us from any context
1366 * getting freed. Lock the context and check if it
1367 * got swapped before we could get the lock, and retry
1368 * if so. If we locked the right context, then it
1369 * can't get swapped on us any more.
1370 */
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001371 raw_spin_lock(&ctx->lock);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001372 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001373 raw_spin_unlock(&ctx->lock);
Peter Zijlstra058ebd02013-07-12 11:08:33 +02001374 rcu_read_unlock();
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001375 local_irq_restore(*flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001376 goto retry;
1377 }
1378
Peter Zijlstra63b6da32016-01-14 16:05:37 +01001379 if (ctx->task == TASK_TOMBSTONE ||
1380 !atomic_inc_not_zero(&ctx->refcount)) {
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001381 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001382 ctx = NULL;
Peter Zijlstra828b6f02016-01-27 21:59:04 +01001383 } else {
1384 WARN_ON_ONCE(ctx->task != task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001385 }
1386 }
1387 rcu_read_unlock();
Paul E. McKenney2fd59072015-11-04 05:48:38 -08001388 if (!ctx)
1389 local_irq_restore(*flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001390 return ctx;
1391}
1392
1393/*
1394 * Get the context for a task and increment its pin_count so it
1395 * can't get swapped to another task. This also increments its
1396 * reference count so that the context can't get freed.
1397 */
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001398static struct perf_event_context *
1399perf_pin_task_context(struct task_struct *task, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001400{
1401 struct perf_event_context *ctx;
1402 unsigned long flags;
1403
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02001404 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001405 if (ctx) {
1406 ++ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001407 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001408 }
1409 return ctx;
1410}
1411
1412static void perf_unpin_context(struct perf_event_context *ctx)
1413{
1414 unsigned long flags;
1415
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001416 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001417 --ctx->pin_count;
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001418 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001419}
1420
Peter Zijlstraf67218c2009-11-23 11:37:27 +01001421/*
1422 * Update the record of the current time in a context.
1423 */
1424static void update_context_time(struct perf_event_context *ctx)
1425{
1426 u64 now = perf_clock();
1427
1428 ctx->time += now - ctx->timestamp;
1429 ctx->timestamp = now;
1430}
1431
Stephane Eranian41587552011-01-03 18:20:01 +02001432static u64 perf_event_time(struct perf_event *event)
1433{
1434 struct perf_event_context *ctx = event->ctx;
Stephane Eraniane5d13672011-02-14 11:20:01 +02001435
1436 if (is_cgroup_event(event))
1437 return perf_cgroup_event_time(event);
1438
Stephane Eranian41587552011-01-03 18:20:01 +02001439 return ctx ? ctx->time : 0;
1440}
1441
Alexander Shishkin487f05e2017-01-19 18:43:30 +02001442static enum event_type_t get_event_type(struct perf_event *event)
1443{
1444 struct perf_event_context *ctx = event->ctx;
1445 enum event_type_t event_type;
1446
1447 lockdep_assert_held(&ctx->lock);
1448
Alexander Shishkin3bda69c2017-07-18 14:08:34 +03001449 /*
1450 * It's 'group type', really, because if our group leader is
1451 * pinned, so are we.
1452 */
1453 if (event->group_leader != event)
1454 event = event->group_leader;
1455
Alexander Shishkin487f05e2017-01-19 18:43:30 +02001456 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1457 if (!ctx->task)
1458 event_type |= EVENT_CPU;
1459
1460 return event_type;
1461}
1462
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001463static struct list_head *
1464ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1465{
1466 if (event->attr.pinned)
1467 return &ctx->pinned_groups;
1468 else
1469 return &ctx->flexible_groups;
1470}
1471
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001472/*
1473 * Add a event from the lists for its context.
1474 * Must be called with ctx->mutex and ctx->lock held.
1475 */
1476static void
1477list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1478{
Peter Zijlstrac994d612016-01-08 09:20:23 +01001479 lockdep_assert_held(&ctx->lock);
1480
Peter Zijlstra8a495422010-05-27 15:47:49 +02001481 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1482 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001483
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001484 event->tstamp = perf_event_time(event);
1485
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001486 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +02001487 * If we're a stand alone event or group leader, we go to the context
1488 * list, group events are kept attached to the group so that
1489 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001490 */
Peter Zijlstra8a495422010-05-27 15:47:49 +02001491 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001492 struct list_head *list;
1493
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07001494 event->group_caps = event->event_caps;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +01001495
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001496 list = ctx_group_list(event, ctx);
1497 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001498 }
1499
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -07001500 list_update_cgroup_event(event, ctx, true);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001501
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001502 list_add_rcu(&event->event_entry, &ctx->event_list);
1503 ctx->nr_events++;
1504 if (event->attr.inherit_stat)
1505 ctx->nr_stat++;
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02001506
1507 ctx->generation++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001508}
1509
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001510/*
Jiri Olsa0231bb52013-02-01 11:23:45 +01001511 * Initialize event state based on the perf_event_attr::disabled.
1512 */
1513static inline void perf_event__state_init(struct perf_event *event)
1514{
1515 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1516 PERF_EVENT_STATE_INACTIVE;
1517}
1518
Peter Zijlstraa7239682015-09-09 19:06:33 +02001519static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001520{
1521 int entry = sizeof(u64); /* value */
1522 int size = 0;
1523 int nr = 1;
1524
1525 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1526 size += sizeof(u64);
1527
1528 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1529 size += sizeof(u64);
1530
1531 if (event->attr.read_format & PERF_FORMAT_ID)
1532 entry += sizeof(u64);
1533
1534 if (event->attr.read_format & PERF_FORMAT_GROUP) {
Peter Zijlstraa7239682015-09-09 19:06:33 +02001535 nr += nr_siblings;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001536 size += sizeof(u64);
1537 }
1538
1539 size += entry * nr;
1540 event->read_size = size;
1541}
1542
Peter Zijlstraa7239682015-09-09 19:06:33 +02001543static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001544{
1545 struct perf_sample_data *data;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001546 u16 size = 0;
1547
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001548 if (sample_type & PERF_SAMPLE_IP)
1549 size += sizeof(data->ip);
1550
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001551 if (sample_type & PERF_SAMPLE_ADDR)
1552 size += sizeof(data->addr);
1553
1554 if (sample_type & PERF_SAMPLE_PERIOD)
1555 size += sizeof(data->period);
1556
Andi Kleenc3feedf2013-01-24 16:10:28 +01001557 if (sample_type & PERF_SAMPLE_WEIGHT)
1558 size += sizeof(data->weight);
1559
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001560 if (sample_type & PERF_SAMPLE_READ)
1561 size += event->read_size;
1562
Stephane Eraniand6be9ad2013-01-24 16:10:31 +01001563 if (sample_type & PERF_SAMPLE_DATA_SRC)
1564 size += sizeof(data->data_src.val);
1565
Andi Kleenfdfbbd02013-09-20 07:40:39 -07001566 if (sample_type & PERF_SAMPLE_TRANSACTION)
1567 size += sizeof(data->txn);
1568
Kan Liangfc7ce9c2017-08-28 20:52:49 -04001569 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1570 size += sizeof(data->phys_addr);
1571
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001572 event->header_size = size;
1573}
1574
Peter Zijlstraa7239682015-09-09 19:06:33 +02001575/*
1576 * Called at perf_event creation and when events are attached/detached from a
1577 * group.
1578 */
1579static void perf_event__header_size(struct perf_event *event)
1580{
1581 __perf_event_read_size(event,
1582 event->group_leader->nr_siblings);
1583 __perf_event_header_size(event, event->attr.sample_type);
1584}
1585
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001586static void perf_event__id_header_size(struct perf_event *event)
1587{
1588 struct perf_sample_data *data;
1589 u64 sample_type = event->attr.sample_type;
1590 u16 size = 0;
1591
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001592 if (sample_type & PERF_SAMPLE_TID)
1593 size += sizeof(data->tid_entry);
1594
1595 if (sample_type & PERF_SAMPLE_TIME)
1596 size += sizeof(data->time);
1597
Adrian Hunterff3d5272013-08-27 11:23:07 +03001598 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1599 size += sizeof(data->id);
1600
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001601 if (sample_type & PERF_SAMPLE_ID)
1602 size += sizeof(data->id);
1603
1604 if (sample_type & PERF_SAMPLE_STREAM_ID)
1605 size += sizeof(data->stream_id);
1606
1607 if (sample_type & PERF_SAMPLE_CPU)
1608 size += sizeof(data->cpu_entry);
1609
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02001610 event->id_header_size = size;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001611}
1612
Peter Zijlstraa7239682015-09-09 19:06:33 +02001613static bool perf_event_validate_size(struct perf_event *event)
1614{
1615 /*
1616 * The values computed here will be over-written when we actually
1617 * attach the event.
1618 */
1619 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1620 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1621 perf_event__id_header_size(event);
1622
1623 /*
1624 * Sum the lot; should not exceed the 64k limit we have on records.
1625 * Conservative limit to allow for callchains and other variable fields.
1626 */
1627 if (event->read_size + event->header_size +
1628 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1629 return false;
1630
1631 return true;
1632}
1633
Peter Zijlstra8a495422010-05-27 15:47:49 +02001634static void perf_group_attach(struct perf_event *event)
1635{
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001636 struct perf_event *group_leader = event->group_leader, *pos;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001637
Peter Zijlstraa76a82a2017-01-26 16:39:55 +01001638 lockdep_assert_held(&event->ctx->lock);
1639
Peter Zijlstra74c33372010-10-15 11:40:29 +02001640 /*
1641 * We can have double attach due to group movement in perf_event_open.
1642 */
1643 if (event->attach_state & PERF_ATTACH_GROUP)
1644 return;
1645
Peter Zijlstra8a495422010-05-27 15:47:49 +02001646 event->attach_state |= PERF_ATTACH_GROUP;
1647
1648 if (group_leader == event)
1649 return;
1650
Peter Zijlstra652884f2015-01-23 11:20:10 +01001651 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1652
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07001653 group_leader->group_caps &= event->event_caps;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001654
1655 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1656 group_leader->nr_siblings++;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001657
1658 perf_event__header_size(group_leader);
1659
1660 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1661 perf_event__header_size(pos);
Peter Zijlstra8a495422010-05-27 15:47:49 +02001662}
1663
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001664/*
1665 * Remove a event from the lists for its context.
1666 * Must be called with ctx->mutex and ctx->lock held.
1667 */
1668static void
1669list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1670{
Peter Zijlstra652884f2015-01-23 11:20:10 +01001671 WARN_ON_ONCE(event->ctx != ctx);
1672 lockdep_assert_held(&ctx->lock);
1673
Peter Zijlstra8a495422010-05-27 15:47:49 +02001674 /*
1675 * We can have double detach due to exit/hot-unplug + close.
1676 */
1677 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001678 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001679
1680 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1681
David Carrillo-Cisnerosdb4a8352016-08-02 00:48:12 -07001682 list_update_cgroup_event(event, ctx, false);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001683
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001684 ctx->nr_events--;
1685 if (event->attr.inherit_stat)
1686 ctx->nr_stat--;
1687
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001688 list_del_rcu(&event->event_entry);
1689
Peter Zijlstra8a495422010-05-27 15:47:49 +02001690 if (event->group_leader == event)
1691 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001692
Stephane Eranianb2e74a22009-11-26 09:24:30 -08001693 /*
1694 * If event was in error state, then keep it
1695 * that way, otherwise bogus counts will be
1696 * returned on read(). The only way to get out
1697 * of error state is by explicit re-enabling
1698 * of the event
1699 */
1700 if (event->state > PERF_EVENT_STATE_OFF)
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001701 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02001702
1703 ctx->generation++;
Peter Zijlstra050735b2010-05-11 11:51:53 +02001704}
1705
Peter Zijlstra8a495422010-05-27 15:47:49 +02001706static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +02001707{
1708 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001709 struct list_head *list = NULL;
1710
Peter Zijlstraa76a82a2017-01-26 16:39:55 +01001711 lockdep_assert_held(&event->ctx->lock);
1712
Peter Zijlstra8a495422010-05-27 15:47:49 +02001713 /*
1714 * We can have double detach due to exit/hot-unplug + close.
1715 */
1716 if (!(event->attach_state & PERF_ATTACH_GROUP))
1717 return;
1718
1719 event->attach_state &= ~PERF_ATTACH_GROUP;
1720
1721 /*
1722 * If this is a sibling, remove it from its group.
1723 */
1724 if (event->group_leader != event) {
1725 list_del_init(&event->group_entry);
1726 event->group_leader->nr_siblings--;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001727 goto out;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001728 }
1729
1730 if (!list_empty(&event->group_entry))
1731 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +01001732
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001733 /*
1734 * If this was a group event with sibling events then
1735 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +02001736 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001737 */
1738 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +02001739 if (list)
1740 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001741 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +01001742
1743 /* Inherit group flags from the previous leader */
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07001744 sibling->group_caps = event->group_caps;
Peter Zijlstra652884f2015-01-23 11:20:10 +01001745
1746 WARN_ON_ONCE(sibling->ctx != event->ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001747 }
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001748
1749out:
1750 perf_event__header_size(event->group_leader);
1751
1752 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1753 perf_event__header_size(tmp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001754}
1755
Jiri Olsafadfe7b2014-08-01 14:33:02 +02001756static bool is_orphaned_event(struct perf_event *event)
1757{
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01001758 return event->state == PERF_EVENT_STATE_DEAD;
Jiri Olsafadfe7b2014-08-01 14:33:02 +02001759}
1760
Mark Rutland2c81a642016-06-14 16:10:41 +01001761static inline int __pmu_filter_match(struct perf_event *event)
Mark Rutland66eb5792015-05-13 17:12:23 +01001762{
1763 struct pmu *pmu = event->pmu;
1764 return pmu->filter_match ? pmu->filter_match(event) : 1;
1765}
1766
Mark Rutland2c81a642016-06-14 16:10:41 +01001767/*
1768 * Check whether we should attempt to schedule an event group based on
1769 * PMU-specific filtering. An event group can consist of HW and SW events,
1770 * potentially with a SW leader, so we must check all the filters, to
1771 * determine whether a group is schedulable:
1772 */
1773static inline int pmu_filter_match(struct perf_event *event)
1774{
1775 struct perf_event *child;
1776
1777 if (!__pmu_filter_match(event))
1778 return 0;
1779
1780 list_for_each_entry(child, &event->sibling_list, group_entry) {
1781 if (!__pmu_filter_match(child))
1782 return 0;
1783 }
1784
1785 return 1;
1786}
1787
Stephane Eranianfa66f072010-08-26 16:40:01 +02001788static inline int
1789event_filter_match(struct perf_event *event)
1790{
Peter Zijlstra0b8f1e22016-08-04 14:37:24 +02001791 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1792 perf_cgroup_match(event) && pmu_filter_match(event);
Stephane Eranianfa66f072010-08-26 16:40:01 +02001793}
1794
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001795static void
1796event_sched_out(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001797 struct perf_cpu_context *cpuctx,
1798 struct perf_event_context *ctx)
1799{
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001800 enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
Peter Zijlstra652884f2015-01-23 11:20:10 +01001801
1802 WARN_ON_ONCE(event->ctx != ctx);
1803 lockdep_assert_held(&ctx->lock);
1804
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001805 if (event->state != PERF_EVENT_STATE_ACTIVE)
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001806 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001807
Alexander Shishkin44377272013-12-16 14:17:36 +02001808 perf_pmu_disable(event->pmu);
1809
Peter Zijlstra28a967c2016-02-24 18:45:46 +01001810 event->pmu->del(event, 0);
1811 event->oncpu = -1;
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001812
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001813 if (event->pending_disable) {
1814 event->pending_disable = 0;
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001815 state = PERF_EVENT_STATE_OFF;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001816 }
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001817 perf_event_set_state(event, state);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001818
1819 if (!is_software_event(event))
1820 cpuctx->active_oncpu--;
Mark Rutland2fde4f92015-01-07 15:01:54 +00001821 if (!--ctx->nr_active)
1822 perf_event_ctx_deactivate(ctx);
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01001823 if (event->attr.freq && event->attr.sample_freq)
1824 ctx->nr_freq--;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001825 if (event->attr.exclusive || !cpuctx->active_oncpu)
1826 cpuctx->exclusive = 0;
Alexander Shishkin44377272013-12-16 14:17:36 +02001827
1828 perf_pmu_enable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001829}
1830
1831static void
1832group_sched_out(struct perf_event *group_event,
1833 struct perf_cpu_context *cpuctx,
1834 struct perf_event_context *ctx)
1835{
1836 struct perf_event *event;
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001837
1838 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
1839 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001840
Mark Rutland3f005e72016-07-26 18:12:21 +01001841 perf_pmu_disable(ctx->pmu);
1842
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001843 event_sched_out(group_event, cpuctx, ctx);
1844
1845 /*
1846 * Schedule out siblings (if any):
1847 */
1848 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1849 event_sched_out(event, cpuctx, ctx);
1850
Mark Rutland3f005e72016-07-26 18:12:21 +01001851 perf_pmu_enable(ctx->pmu);
1852
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001853 if (group_event->attr.exclusive)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001854 cpuctx->exclusive = 0;
1855}
1856
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001857#define DETACH_GROUP 0x01UL
Peter Zijlstra00179602015-11-30 16:26:35 +01001858
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001859/*
1860 * Cross CPU call to remove a performance event
1861 *
1862 * We disable the event on the hardware level first. After that we
1863 * remove it from the context list.
1864 */
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001865static void
1866__perf_remove_from_context(struct perf_event *event,
1867 struct perf_cpu_context *cpuctx,
1868 struct perf_event_context *ctx,
1869 void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001870{
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001871 unsigned long flags = (unsigned long)info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001872
Peter Zijlstra3c5c8712017-09-05 13:44:51 +02001873 if (ctx->is_active & EVENT_TIME) {
1874 update_context_time(ctx);
1875 update_cgrp_time_from_cpuctx(cpuctx);
1876 }
1877
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001878 event_sched_out(event, cpuctx, ctx);
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001879 if (flags & DETACH_GROUP)
Peter Zijlstra46ce0fe2014-05-02 16:56:01 +02001880 perf_group_detach(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001881 list_del_event(event, ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001882
Peter Zijlstra39a43642016-01-11 12:46:35 +01001883 if (!ctx->nr_events && ctx->is_active) {
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001884 ctx->is_active = 0;
Peter Zijlstra39a43642016-01-11 12:46:35 +01001885 if (ctx->task) {
1886 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1887 cpuctx->task_ctx = NULL;
1888 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001889 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001890}
1891
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001892/*
1893 * Remove the event from a task's (or a CPU's) list of events.
1894 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001895 * If event->ctx is a cloned context, callers must make sure that
1896 * every task struct that event->ctx->task could possibly point to
1897 * remains valid. This is OK when called from perf_release since
1898 * that only calls us on the top-level context, which can't be a clone.
1899 * When called from perf_event_exit_task, it's OK because the
1900 * context has been detached from its task.
1901 */
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001902static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001903{
Peter Zijlstraa76a82a2017-01-26 16:39:55 +01001904 struct perf_event_context *ctx = event->ctx;
1905
1906 lockdep_assert_held(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001907
Peter Zijlstra45a0e072016-01-26 13:09:48 +01001908 event_function_call(event, __perf_remove_from_context, (void *)flags);
Peter Zijlstraa76a82a2017-01-26 16:39:55 +01001909
1910 /*
1911 * The above event_function_call() can NO-OP when it hits
1912 * TASK_TOMBSTONE. In that case we must already have been detached
1913 * from the context (by perf_event_exit_event()) but the grouping
1914 * might still be in-tact.
1915 */
1916 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1917 if ((flags & DETACH_GROUP) &&
1918 (event->attach_state & PERF_ATTACH_GROUP)) {
1919 /*
1920 * Since in that case we cannot possibly be scheduled, simply
1921 * detach now.
1922 */
1923 raw_spin_lock_irq(&ctx->lock);
1924 perf_group_detach(event);
1925 raw_spin_unlock_irq(&ctx->lock);
1926 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001927}
1928
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001929/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001930 * Cross CPU call to disable a performance event
1931 */
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001932static void __perf_event_disable(struct perf_event *event,
1933 struct perf_cpu_context *cpuctx,
1934 struct perf_event_context *ctx,
1935 void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001936{
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001937 if (event->state < PERF_EVENT_STATE_INACTIVE)
1938 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001939
Peter Zijlstra3c5c8712017-09-05 13:44:51 +02001940 if (ctx->is_active & EVENT_TIME) {
1941 update_context_time(ctx);
1942 update_cgrp_time_from_event(event);
1943 }
1944
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001945 if (event == event->group_leader)
1946 group_sched_out(event, cpuctx, ctx);
1947 else
1948 event_sched_out(event, cpuctx, ctx);
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02001949
1950 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
Peter Zijlstra7b648012015-12-03 18:35:21 +01001951}
1952
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001953/*
1954 * Disable a event.
1955 *
1956 * If event->ctx is a cloned context, callers must make sure that
1957 * every task struct that event->ctx->task could possibly point to
1958 * remains valid. This condition is satisifed when called through
1959 * perf_event_for_each_child or perf_event_for_each because they
1960 * hold the top-level event's child_mutex, so any descendant that
Peter Zijlstra8ba289b2016-01-26 13:06:56 +01001961 * goes to exit will block in perf_event_exit_event().
1962 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001963 * When called from perf_pending_event it's OK because event->ctx
1964 * is the current context on this CPU and preemption is disabled,
1965 * hence we can't get into perf_event_task_sched_out for this context.
1966 */
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001967static void _perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001968{
1969 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001970
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001971 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra7b648012015-12-03 18:35:21 +01001972 if (event->state <= PERF_EVENT_STATE_OFF) {
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001973 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstra7b648012015-12-03 18:35:21 +01001974 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001975 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01001976 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstra7b648012015-12-03 18:35:21 +01001977
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01001978 event_function_call(event, __perf_event_disable, NULL);
1979}
1980
1981void perf_event_disable_local(struct perf_event *event)
1982{
1983 event_function_local(event, __perf_event_disable, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001984}
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01001985
1986/*
1987 * Strictly speaking kernel users cannot create groups and therefore this
1988 * interface does not need the perf_event_ctx_lock() magic.
1989 */
1990void perf_event_disable(struct perf_event *event)
1991{
1992 struct perf_event_context *ctx;
1993
1994 ctx = perf_event_ctx_lock(event);
1995 _perf_event_disable(event);
1996 perf_event_ctx_unlock(event, ctx);
1997}
Robert Richterdcfce4a2011-10-11 17:11:08 +02001998EXPORT_SYMBOL_GPL(perf_event_disable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001999
Jiri Olsa5aab90c2016-10-26 11:48:24 +02002000void perf_event_disable_inatomic(struct perf_event *event)
2001{
2002 event->pending_disable = 1;
2003 irq_work_queue(&event->pending);
2004}
2005
Stephane Eraniane5d13672011-02-14 11:20:01 +02002006static void perf_set_shadow_time(struct perf_event *event,
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002007 struct perf_event_context *ctx)
Stephane Eraniane5d13672011-02-14 11:20:01 +02002008{
2009 /*
2010 * use the correct time source for the time snapshot
2011 *
2012 * We could get by without this by leveraging the
2013 * fact that to get to this function, the caller
2014 * has most likely already called update_context_time()
2015 * and update_cgrp_time_xx() and thus both timestamp
2016 * are identical (or very close). Given that tstamp is,
2017 * already adjusted for cgroup, we could say that:
2018 * tstamp - ctx->timestamp
2019 * is equivalent to
2020 * tstamp - cgrp->timestamp.
2021 *
2022 * Then, in perf_output_read(), the calculation would
2023 * work with no changes because:
2024 * - event is guaranteed scheduled in
2025 * - no scheduled out in between
2026 * - thus the timestamp would be the same
2027 *
2028 * But this is a bit hairy.
2029 *
2030 * So instead, we have an explicit cgroup call to remain
2031 * within the time time source all along. We believe it
2032 * is cleaner and simpler to understand.
2033 */
2034 if (is_cgroup_event(event))
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002035 perf_cgroup_set_shadow_time(event, event->tstamp);
Stephane Eraniane5d13672011-02-14 11:20:01 +02002036 else
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002037 event->shadow_ctx_time = event->tstamp - ctx->timestamp;
Stephane Eraniane5d13672011-02-14 11:20:01 +02002038}
2039
Peter Zijlstra4fe757d2011-02-15 22:26:07 +01002040#define MAX_INTERRUPTS (~0ULL)
2041
2042static void perf_log_throttle(struct perf_event *event, int enable);
Alexander Shishkinec0d7722015-01-14 14:18:23 +02002043static void perf_log_itrace_start(struct perf_event *event);
Peter Zijlstra4fe757d2011-02-15 22:26:07 +01002044
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002045static int
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002046event_sched_in(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002047 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01002048 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002049{
Alexander Shishkin44377272013-12-16 14:17:36 +02002050 int ret = 0;
Stephane Eranian41587552011-01-03 18:20:01 +02002051
Peter Zijlstra63342412014-05-05 11:49:16 +02002052 lockdep_assert_held(&ctx->lock);
2053
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002054 if (event->state <= PERF_EVENT_STATE_OFF)
2055 return 0;
2056
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002057 WRITE_ONCE(event->oncpu, smp_processor_id());
2058 /*
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02002059 * Order event::oncpu write to happen before the ACTIVE state is
2060 * visible. This allows perf_event_{stop,read}() to observe the correct
2061 * ->oncpu if it sees ACTIVE.
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002062 */
2063 smp_wmb();
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002064 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
Peter Zijlstra4fe757d2011-02-15 22:26:07 +01002065
2066 /*
2067 * Unthrottle events, since we scheduled we might have missed several
2068 * ticks already, also for a heavily scheduling task there is little
2069 * guarantee it'll get a tick in a timely manner.
2070 */
2071 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2072 perf_log_throttle(event, 1);
2073 event->hw.interrupts = 0;
2074 }
2075
Alexander Shishkin44377272013-12-16 14:17:36 +02002076 perf_pmu_disable(event->pmu);
2077
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002078 perf_set_shadow_time(event, ctx);
Shaohua Li72f669c2015-02-05 15:55:31 -08002079
Alexander Shishkinec0d7722015-01-14 14:18:23 +02002080 perf_log_itrace_start(event);
2081
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002082 if (event->pmu->add(event, PERF_EF_START)) {
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002083 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002084 event->oncpu = -1;
Alexander Shishkin44377272013-12-16 14:17:36 +02002085 ret = -EAGAIN;
2086 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002087 }
2088
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002089 if (!is_software_event(event))
2090 cpuctx->active_oncpu++;
Mark Rutland2fde4f92015-01-07 15:01:54 +00002091 if (!ctx->nr_active++)
2092 perf_event_ctx_activate(ctx);
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01002093 if (event->attr.freq && event->attr.sample_freq)
2094 ctx->nr_freq++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002095
2096 if (event->attr.exclusive)
2097 cpuctx->exclusive = 1;
2098
Alexander Shishkin44377272013-12-16 14:17:36 +02002099out:
2100 perf_pmu_enable(event->pmu);
2101
2102 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002103}
2104
2105static int
2106group_sched_in(struct perf_event *group_event,
2107 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01002108 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002109{
Lin Ming6bde9b62010-04-23 13:56:00 +08002110 struct perf_event *event, *partial_group = NULL;
Peter Zijlstra4a234592014-02-24 12:43:31 +01002111 struct pmu *pmu = ctx->pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002112
2113 if (group_event->state == PERF_EVENT_STATE_OFF)
2114 return 0;
2115
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07002116 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
Lin Ming6bde9b62010-04-23 13:56:00 +08002117
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002118 if (event_sched_in(group_event, cpuctx, ctx)) {
Peter Zijlstraad5133b2010-06-15 12:22:39 +02002119 pmu->cancel_txn(pmu);
Peter Zijlstra272325c2015-04-15 11:41:58 +02002120 perf_mux_hrtimer_restart(cpuctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002121 return -EAGAIN;
Stephane Eranian90151c352010-05-25 16:23:10 +02002122 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002123
2124 /*
2125 * Schedule in siblings as one group (if any):
2126 */
2127 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002128 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002129 partial_group = event;
2130 goto group_error;
2131 }
2132 }
2133
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002134 if (!pmu->commit_txn(pmu))
Paul Mackerras6e851582010-05-08 20:58:00 +10002135 return 0;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002136
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002137group_error:
2138 /*
2139 * Groups can be scheduled in as one unit only, so undo any
2140 * partial group before returning:
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002141 * The events up to the failed event are scheduled out normally.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002142 */
2143 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2144 if (event == partial_group)
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002145 break;
Stephane Eraniand7842da2010-10-20 15:25:01 +02002146
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002147 event_sched_out(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002148 }
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02002149 event_sched_out(group_event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002150
Peter Zijlstraad5133b2010-06-15 12:22:39 +02002151 pmu->cancel_txn(pmu);
Stephane Eranian90151c352010-05-25 16:23:10 +02002152
Peter Zijlstra272325c2015-04-15 11:41:58 +02002153 perf_mux_hrtimer_restart(cpuctx);
Stephane Eranian9e630202013-04-03 14:21:33 +02002154
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002155 return -EAGAIN;
2156}
2157
2158/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002159 * Work out whether we can put this event group on the CPU now.
2160 */
2161static int group_can_go_on(struct perf_event *event,
2162 struct perf_cpu_context *cpuctx,
2163 int can_add_hw)
2164{
2165 /*
2166 * Groups consisting entirely of software events can always go on.
2167 */
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07002168 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002169 return 1;
2170 /*
2171 * If an exclusive group is already on, no other hardware
2172 * events can go on.
2173 */
2174 if (cpuctx->exclusive)
2175 return 0;
2176 /*
2177 * If this group is exclusive and there are already
2178 * events on the CPU, it can't go on.
2179 */
2180 if (event->attr.exclusive && cpuctx->active_oncpu)
2181 return 0;
2182 /*
2183 * Otherwise, try to add it if all previous groups were able
2184 * to go on.
2185 */
2186 return can_add_hw;
2187}
2188
2189static void add_event_to_ctx(struct perf_event *event,
2190 struct perf_event_context *ctx)
2191{
2192 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +02002193 perf_group_attach(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002194}
2195
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002196static void ctx_sched_out(struct perf_event_context *ctx,
2197 struct perf_cpu_context *cpuctx,
2198 enum event_type_t event_type);
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02002199static void
2200ctx_sched_in(struct perf_event_context *ctx,
2201 struct perf_cpu_context *cpuctx,
2202 enum event_type_t event_type,
2203 struct task_struct *task);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002204
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002205static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002206 struct perf_event_context *ctx,
2207 enum event_type_t event_type)
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002208{
2209 if (!cpuctx->task_ctx)
2210 return;
2211
2212 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2213 return;
2214
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002215 ctx_sched_out(ctx, cpuctx, event_type);
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002216}
2217
Peter Zijlstradce58552011-04-09 21:17:46 +02002218static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2219 struct perf_event_context *ctx,
2220 struct task_struct *task)
2221{
2222 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2223 if (ctx)
2224 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2225 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2226 if (ctx)
2227 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2228}
2229
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002230/*
2231 * We want to maintain the following priority of scheduling:
2232 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2233 * - task pinned (EVENT_PINNED)
2234 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2235 * - task flexible (EVENT_FLEXIBLE).
2236 *
2237 * In order to avoid unscheduling and scheduling back in everything every
2238 * time an event is added, only do it for the groups of equal priority and
2239 * below.
2240 *
2241 * This can be called after a batch operation on task events, in which case
2242 * event_type is a bit mask of the types of events involved. For CPU events,
2243 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2244 */
Peter Zijlstra3e349502016-01-08 10:01:18 +01002245static void ctx_resched(struct perf_cpu_context *cpuctx,
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002246 struct perf_event_context *task_ctx,
2247 enum event_type_t event_type)
Peter Zijlstra00179602015-11-30 16:26:35 +01002248{
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002249 enum event_type_t ctx_event_type = event_type & EVENT_ALL;
2250 bool cpu_event = !!(event_type & EVENT_CPU);
2251
2252 /*
2253 * If pinned groups are involved, flexible groups also need to be
2254 * scheduled out.
2255 */
2256 if (event_type & EVENT_PINNED)
2257 event_type |= EVENT_FLEXIBLE;
2258
Peter Zijlstra3e349502016-01-08 10:01:18 +01002259 perf_pmu_disable(cpuctx->ctx.pmu);
2260 if (task_ctx)
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002261 task_ctx_sched_out(cpuctx, task_ctx, event_type);
2262
2263 /*
2264 * Decide which cpu ctx groups to schedule out based on the types
2265 * of events that caused rescheduling:
2266 * - EVENT_CPU: schedule out corresponding groups;
2267 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2268 * - otherwise, do nothing more.
2269 */
2270 if (cpu_event)
2271 cpu_ctx_sched_out(cpuctx, ctx_event_type);
2272 else if (ctx_event_type & EVENT_PINNED)
2273 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2274
Peter Zijlstra3e349502016-01-08 10:01:18 +01002275 perf_event_sched_in(cpuctx, task_ctx, current);
2276 perf_pmu_enable(cpuctx->ctx.pmu);
Peter Zijlstra00179602015-11-30 16:26:35 +01002277}
2278
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002279/*
2280 * Cross CPU call to install and enable a performance event
2281 *
Peter Zijlstraa0963092016-02-24 18:45:50 +01002282 * Very similar to remote_function() + event_function() but cannot assume that
2283 * things like ctx->is_active and cpuctx->task_ctx are set.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002284 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002285static int __perf_install_in_context(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002286{
Peter Zijlstraa0963092016-02-24 18:45:50 +01002287 struct perf_event *event = info;
2288 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002289 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02002290 struct perf_event_context *task_ctx = cpuctx->task_ctx;
Peter Zijlstra63cae122016-12-09 14:59:00 +01002291 bool reprogram = true;
Peter Zijlstraa0963092016-02-24 18:45:50 +01002292 int ret = 0;
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02002293
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002294 raw_spin_lock(&cpuctx->ctx.lock);
Peter Zijlstra39a43642016-01-11 12:46:35 +01002295 if (ctx->task) {
Peter Zijlstrab58f6b02011-06-07 00:23:28 +02002296 raw_spin_lock(&ctx->lock);
2297 task_ctx = ctx;
Peter Zijlstraa0963092016-02-24 18:45:50 +01002298
Peter Zijlstra63cae122016-12-09 14:59:00 +01002299 reprogram = (ctx->task == current);
2300
2301 /*
2302 * If the task is running, it must be running on this CPU,
2303 * otherwise we cannot reprogram things.
2304 *
2305 * If its not running, we don't care, ctx->lock will
2306 * serialize against it becoming runnable.
2307 */
2308 if (task_curr(ctx->task) && !reprogram) {
Peter Zijlstraa0963092016-02-24 18:45:50 +01002309 ret = -ESRCH;
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002310 goto unlock;
Peter Zijlstraa0963092016-02-24 18:45:50 +01002311 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002312
Peter Zijlstra63cae122016-12-09 14:59:00 +01002313 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002314 } else if (task_ctx) {
2315 raw_spin_lock(&task_ctx->lock);
Peter Zijlstrab58f6b02011-06-07 00:23:28 +02002316 }
2317
Peter Zijlstra63cae122016-12-09 14:59:00 +01002318 if (reprogram) {
Peter Zijlstraa0963092016-02-24 18:45:50 +01002319 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2320 add_event_to_ctx(event, ctx);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002321 ctx_resched(cpuctx, task_ctx, get_event_type(event));
Peter Zijlstraa0963092016-02-24 18:45:50 +01002322 } else {
2323 add_event_to_ctx(event, ctx);
2324 }
2325
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002326unlock:
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02002327 perf_ctx_unlock(cpuctx, task_ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002328
Peter Zijlstraa0963092016-02-24 18:45:50 +01002329 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002330}
2331
2332/*
Peter Zijlstraa0963092016-02-24 18:45:50 +01002333 * Attach a performance event to a context.
2334 *
2335 * Very similar to event_function_call, see comment there.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002336 */
2337static void
2338perf_install_in_context(struct perf_event_context *ctx,
2339 struct perf_event *event,
2340 int cpu)
2341{
Peter Zijlstraa0963092016-02-24 18:45:50 +01002342 struct task_struct *task = READ_ONCE(ctx->task);
Peter Zijlstra39a43642016-01-11 12:46:35 +01002343
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002344 lockdep_assert_held(&ctx->mutex);
2345
Yan, Zheng0cda4c02012-06-15 14:31:33 +08002346 if (event->cpu != -1)
2347 event->cpu = cpu;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02002348
Peter Zijlstra0b8f1e22016-08-04 14:37:24 +02002349 /*
2350 * Ensures that if we can observe event->ctx, both the event and ctx
2351 * will be 'complete'. See perf_iterate_sb_cpu().
2352 */
2353 smp_store_release(&event->ctx, ctx);
2354
Peter Zijlstraa0963092016-02-24 18:45:50 +01002355 if (!task) {
2356 cpu_function_call(cpu, __perf_install_in_context, event);
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002357 return;
2358 }
Peter Zijlstra6f932e52016-02-24 18:45:43 +01002359
Peter Zijlstraa0963092016-02-24 18:45:50 +01002360 /*
2361 * Should not happen, we validate the ctx is still alive before calling.
2362 */
2363 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2364 return;
2365
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002366 /*
2367 * Installing events is tricky because we cannot rely on ctx->is_active
2368 * to be set in case this is the nr_events 0 -> 1 transition.
Peter Zijlstra63cae122016-12-09 14:59:00 +01002369 *
2370 * Instead we use task_curr(), which tells us if the task is running.
2371 * However, since we use task_curr() outside of rq::lock, we can race
2372 * against the actual state. This means the result can be wrong.
2373 *
2374 * If we get a false positive, we retry, this is harmless.
2375 *
2376 * If we get a false negative, things are complicated. If we are after
2377 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2378 * value must be correct. If we're before, it doesn't matter since
2379 * perf_event_context_sched_in() will program the counter.
2380 *
2381 * However, this hinges on the remote context switch having observed
2382 * our task->perf_event_ctxp[] store, such that it will in fact take
2383 * ctx::lock in perf_event_context_sched_in().
2384 *
2385 * We do this by task_function_call(), if the IPI fails to hit the task
2386 * we know any future context switch of task must see the
2387 * perf_event_ctpx[] store.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002388 */
Peter Zijlstra63cae122016-12-09 14:59:00 +01002389
Peter Zijlstraa0963092016-02-24 18:45:50 +01002390 /*
Peter Zijlstra63cae122016-12-09 14:59:00 +01002391 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2392 * task_cpu() load, such that if the IPI then does not find the task
2393 * running, a future context switch of that task must observe the
2394 * store.
Peter Zijlstraa0963092016-02-24 18:45:50 +01002395 */
Peter Zijlstra63cae122016-12-09 14:59:00 +01002396 smp_mb();
2397again:
2398 if (!task_function_call(task, __perf_install_in_context, event))
Peter Zijlstraa0963092016-02-24 18:45:50 +01002399 return;
2400
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002401 raw_spin_lock_irq(&ctx->lock);
2402 task = ctx->task;
Peter Zijlstraa0963092016-02-24 18:45:50 +01002403 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2404 /*
2405 * Cannot happen because we already checked above (which also
2406 * cannot happen), and we hold ctx->mutex, which serializes us
2407 * against perf_event_exit_task_context().
2408 */
Peter Zijlstra39a43642016-01-11 12:46:35 +01002409 raw_spin_unlock_irq(&ctx->lock);
2410 return;
2411 }
Peter Zijlstraa0963092016-02-24 18:45:50 +01002412 /*
Peter Zijlstra63cae122016-12-09 14:59:00 +01002413 * If the task is not running, ctx->lock will avoid it becoming so,
2414 * thus we can safely install the event.
Peter Zijlstraa0963092016-02-24 18:45:50 +01002415 */
Peter Zijlstra63cae122016-12-09 14:59:00 +01002416 if (task_curr(task)) {
2417 raw_spin_unlock_irq(&ctx->lock);
2418 goto again;
2419 }
2420 add_event_to_ctx(event, ctx);
2421 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002422}
2423
2424/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002425 * Cross CPU call to enable a performance event
2426 */
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002427static void __perf_event_enable(struct perf_event *event,
2428 struct perf_cpu_context *cpuctx,
2429 struct perf_event_context *ctx,
2430 void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002431{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002432 struct perf_event *leader = event->group_leader;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002433 struct perf_event_context *task_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002434
Peter Zijlstra6e801e012016-01-26 12:17:08 +01002435 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2436 event->state <= PERF_EVENT_STATE_ERROR)
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002437 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002438
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002439 if (ctx->is_active)
2440 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2441
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002442 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002443
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002444 if (!ctx->is_active)
2445 return;
2446
Stephane Eraniane5d13672011-02-14 11:20:01 +02002447 if (!event_filter_match(event)) {
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002448 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002449 return;
Stephane Eraniane5d13672011-02-14 11:20:01 +02002450 }
Peter Zijlstraf4c41762009-12-16 17:55:54 +01002451
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002452 /*
2453 * If the event is in a group and isn't the group leader,
2454 * then don't put it on unless the group is on.
2455 */
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002456 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2457 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002458 return;
Peter Zijlstrabd2afa42016-02-24 18:45:49 +01002459 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002460
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002461 task_ctx = cpuctx->task_ctx;
2462 if (ctx->task)
2463 WARN_ON_ONCE(task_ctx != ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002464
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002465 ctx_resched(cpuctx, task_ctx, get_event_type(event));
Peter Zijlstra7b648012015-12-03 18:35:21 +01002466}
2467
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002468/*
2469 * Enable a event.
2470 *
2471 * If event->ctx is a cloned context, callers must make sure that
2472 * every task struct that event->ctx->task could possibly point to
2473 * remains valid. This condition is satisfied when called through
2474 * perf_event_for_each_child or perf_event_for_each as described
2475 * for perf_event_disable.
2476 */
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002477static void _perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002478{
2479 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002480
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002481 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra6e801e012016-01-26 12:17:08 +01002482 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2483 event->state < PERF_EVENT_STATE_ERROR) {
Peter Zijlstra7b648012015-12-03 18:35:21 +01002484 raw_spin_unlock_irq(&ctx->lock);
2485 return;
2486 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002487
2488 /*
2489 * If the event is in error state, clear that first.
Peter Zijlstra7b648012015-12-03 18:35:21 +01002490 *
2491 * That way, if we see the event in error state below, we know that it
2492 * has gone back into error state, as distinct from the task having
2493 * been scheduled away before the cross-call arrived.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002494 */
2495 if (event->state == PERF_EVENT_STATE_ERROR)
2496 event->state = PERF_EVENT_STATE_OFF;
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002497 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002498
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01002499 event_function_call(event, __perf_event_enable, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002500}
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002501
2502/*
2503 * See perf_event_disable();
2504 */
2505void perf_event_enable(struct perf_event *event)
2506{
2507 struct perf_event_context *ctx;
2508
2509 ctx = perf_event_ctx_lock(event);
2510 _perf_event_enable(event);
2511 perf_event_ctx_unlock(event, ctx);
2512}
Robert Richterdcfce4a2011-10-11 17:11:08 +02002513EXPORT_SYMBOL_GPL(perf_event_enable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002514
Alexander Shishkin375637b2016-04-27 18:44:46 +03002515struct stop_event_data {
2516 struct perf_event *event;
2517 unsigned int restart;
2518};
2519
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002520static int __perf_event_stop(void *info)
2521{
Alexander Shishkin375637b2016-04-27 18:44:46 +03002522 struct stop_event_data *sd = info;
2523 struct perf_event *event = sd->event;
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002524
Alexander Shishkin375637b2016-04-27 18:44:46 +03002525 /* if it's already INACTIVE, do nothing */
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002526 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2527 return 0;
2528
2529 /* matches smp_wmb() in event_sched_in() */
2530 smp_rmb();
2531
2532 /*
2533 * There is a window with interrupts enabled before we get here,
2534 * so we need to check again lest we try to stop another CPU's event.
2535 */
2536 if (READ_ONCE(event->oncpu) != smp_processor_id())
2537 return -EAGAIN;
2538
2539 event->pmu->stop(event, PERF_EF_UPDATE);
2540
Alexander Shishkin375637b2016-04-27 18:44:46 +03002541 /*
2542 * May race with the actual stop (through perf_pmu_output_stop()),
2543 * but it is only used for events with AUX ring buffer, and such
2544 * events will refuse to restart because of rb::aux_mmap_count==0,
2545 * see comments in perf_aux_output_begin().
2546 *
2547 * Since this is happening on a event-local CPU, no trace is lost
2548 * while restarting.
2549 */
2550 if (sd->restart)
Will Deaconc9bbdd42016-08-15 11:42:45 +01002551 event->pmu->start(event, 0);
Alexander Shishkin375637b2016-04-27 18:44:46 +03002552
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02002553 return 0;
2554}
2555
Alexander Shishkin767ae082016-09-06 16:23:49 +03002556static int perf_event_stop(struct perf_event *event, int restart)
Alexander Shishkin375637b2016-04-27 18:44:46 +03002557{
2558 struct stop_event_data sd = {
2559 .event = event,
Alexander Shishkin767ae082016-09-06 16:23:49 +03002560 .restart = restart,
Alexander Shishkin375637b2016-04-27 18:44:46 +03002561 };
2562 int ret = 0;
2563
2564 do {
2565 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2566 return 0;
2567
2568 /* matches smp_wmb() in event_sched_in() */
2569 smp_rmb();
2570
2571 /*
2572 * We only want to restart ACTIVE events, so if the event goes
2573 * inactive here (event->oncpu==-1), there's nothing more to do;
2574 * fall through with ret==-ENXIO.
2575 */
2576 ret = cpu_function_call(READ_ONCE(event->oncpu),
2577 __perf_event_stop, &sd);
2578 } while (ret == -EAGAIN);
2579
2580 return ret;
2581}
2582
2583/*
2584 * In order to contain the amount of racy and tricky in the address filter
2585 * configuration management, it is a two part process:
2586 *
2587 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2588 * we update the addresses of corresponding vmas in
2589 * event::addr_filters_offs array and bump the event::addr_filters_gen;
2590 * (p2) when an event is scheduled in (pmu::add), it calls
2591 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2592 * if the generation has changed since the previous call.
2593 *
2594 * If (p1) happens while the event is active, we restart it to force (p2).
2595 *
2596 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2597 * pre-existing mappings, called once when new filters arrive via SET_FILTER
2598 * ioctl;
2599 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2600 * registered mapping, called for every new mmap(), with mm::mmap_sem down
2601 * for reading;
2602 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2603 * of exec.
2604 */
2605void perf_event_addr_filters_sync(struct perf_event *event)
2606{
2607 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2608
2609 if (!has_addr_filter(event))
2610 return;
2611
2612 raw_spin_lock(&ifh->lock);
2613 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2614 event->pmu->addr_filters_sync(event);
2615 event->hw.addr_filters_gen = event->addr_filters_gen;
2616 }
2617 raw_spin_unlock(&ifh->lock);
2618}
2619EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2620
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002621static int _perf_event_refresh(struct perf_event *event, int refresh)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002622{
2623 /*
2624 * not supported on inherited events
2625 */
Franck Bui-Huu2e939d12010-11-23 16:21:44 +01002626 if (event->attr.inherit || !is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002627 return -EINVAL;
2628
2629 atomic_add(refresh, &event->event_limit);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002630 _perf_event_enable(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002631
2632 return 0;
2633}
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01002634
2635/*
2636 * See perf_event_disable()
2637 */
2638int perf_event_refresh(struct perf_event *event, int refresh)
2639{
2640 struct perf_event_context *ctx;
2641 int ret;
2642
2643 ctx = perf_event_ctx_lock(event);
2644 ret = _perf_event_refresh(event, refresh);
2645 perf_event_ctx_unlock(event, ctx);
2646
2647 return ret;
2648}
Avi Kivity26ca5c12011-06-29 18:42:37 +03002649EXPORT_SYMBOL_GPL(perf_event_refresh);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002650
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002651static void ctx_sched_out(struct perf_event_context *ctx,
2652 struct perf_cpu_context *cpuctx,
2653 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002654{
Peter Zijlstradb24d332011-04-09 21:17:45 +02002655 int is_active = ctx->is_active;
Peter Zijlstrac994d612016-01-08 09:20:23 +01002656 struct perf_event *event;
2657
2658 lockdep_assert_held(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002659
Peter Zijlstra39a43642016-01-11 12:46:35 +01002660 if (likely(!ctx->nr_events)) {
2661 /*
2662 * See __perf_remove_from_context().
2663 */
2664 WARN_ON_ONCE(ctx->is_active);
2665 if (ctx->task)
2666 WARN_ON_ONCE(cpuctx->task_ctx);
2667 return;
2668 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002669
Peter Zijlstradb24d332011-04-09 21:17:45 +02002670 ctx->is_active &= ~event_type;
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002671 if (!(ctx->is_active & EVENT_ALL))
2672 ctx->is_active = 0;
2673
Peter Zijlstra63e30d32016-01-08 11:39:10 +01002674 if (ctx->task) {
2675 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2676 if (!ctx->is_active)
2677 cpuctx->task_ctx = NULL;
2678 }
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002679
Peter Zijlstra8fdc6532016-03-29 09:26:44 +02002680 /*
2681 * Always update time if it was set; not only when it changes.
2682 * Otherwise we can 'forget' to update time for any but the last
2683 * context we sched out. For example:
2684 *
2685 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2686 * ctx_sched_out(.event_type = EVENT_PINNED)
2687 *
2688 * would only update time for the pinned events.
2689 */
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002690 if (is_active & EVENT_TIME) {
2691 /* update (and stop) ctx time */
2692 update_context_time(ctx);
2693 update_cgrp_time_from_cpuctx(cpuctx);
2694 }
2695
Peter Zijlstra8fdc6532016-03-29 09:26:44 +02002696 is_active ^= ctx->is_active; /* changed bits */
2697
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002698 if (!ctx->nr_active || !(is_active & EVENT_ALL))
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002699 return;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002700
Peter Zijlstra075e0b02011-04-09 21:17:40 +02002701 perf_pmu_disable(ctx->pmu);
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002702 if (is_active & EVENT_PINNED) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002703 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2704 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002705 }
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002706
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01002707 if (is_active & EVENT_FLEXIBLE) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002708 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08002709 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002710 }
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02002711 perf_pmu_enable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002712}
2713
2714/*
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002715 * Test whether two contexts are equivalent, i.e. whether they have both been
2716 * cloned from the same version of the same context.
2717 *
2718 * Equivalence is measured using a generation number in the context that is
2719 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2720 * and list_del_event().
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002721 */
2722static int context_equiv(struct perf_event_context *ctx1,
2723 struct perf_event_context *ctx2)
2724{
Peter Zijlstra211de6e2014-09-30 19:23:08 +02002725 lockdep_assert_held(&ctx1->lock);
2726 lockdep_assert_held(&ctx2->lock);
2727
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002728 /* Pinning disables the swap optimization */
2729 if (ctx1->pin_count || ctx2->pin_count)
2730 return 0;
2731
2732 /* If ctx1 is the parent of ctx2 */
2733 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2734 return 1;
2735
2736 /* If ctx2 is the parent of ctx1 */
2737 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2738 return 1;
2739
2740 /*
2741 * If ctx1 and ctx2 have the same parent; we flatten the parent
2742 * hierarchy, see perf_event_init_context().
2743 */
2744 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2745 ctx1->parent_gen == ctx2->parent_gen)
2746 return 1;
2747
2748 /* Unmatched */
2749 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002750}
2751
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002752static void __perf_event_sync_stat(struct perf_event *event,
2753 struct perf_event *next_event)
2754{
2755 u64 value;
2756
2757 if (!event->attr.inherit_stat)
2758 return;
2759
2760 /*
2761 * Update the event value, we cannot use perf_event_read()
2762 * because we're in the middle of a context switch and have IRQs
2763 * disabled, which upsets smp_call_function_single(), however
2764 * we know the event must be on the current CPU, therefore we
2765 * don't need to use it.
2766 */
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002767 if (event->state == PERF_EVENT_STATE_ACTIVE)
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01002768 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002769
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02002770 perf_event_update_time(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002771
2772 /*
2773 * In order to keep per-task stats reliable we need to flip the event
2774 * values when we flip the contexts.
2775 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02002776 value = local64_read(&next_event->count);
2777 value = local64_xchg(&event->count, value);
2778 local64_set(&next_event->count, value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002779
2780 swap(event->total_time_enabled, next_event->total_time_enabled);
2781 swap(event->total_time_running, next_event->total_time_running);
2782
2783 /*
2784 * Since we swizzled the values, update the user visible data too.
2785 */
2786 perf_event_update_userpage(event);
2787 perf_event_update_userpage(next_event);
2788}
2789
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002790static void perf_event_sync_stat(struct perf_event_context *ctx,
2791 struct perf_event_context *next_ctx)
2792{
2793 struct perf_event *event, *next_event;
2794
2795 if (!ctx->nr_stat)
2796 return;
2797
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01002798 update_context_time(ctx);
2799
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002800 event = list_first_entry(&ctx->event_list,
2801 struct perf_event, event_entry);
2802
2803 next_event = list_first_entry(&next_ctx->event_list,
2804 struct perf_event, event_entry);
2805
2806 while (&event->event_entry != &ctx->event_list &&
2807 &next_event->event_entry != &next_ctx->event_list) {
2808
2809 __perf_event_sync_stat(event, next_event);
2810
2811 event = list_next_entry(event, event_entry);
2812 next_event = list_next_entry(next_event, event_entry);
2813 }
2814}
2815
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002816static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2817 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002818{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02002819 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002820 struct perf_event_context *next_ctx;
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002821 struct perf_event_context *parent, *next_parent;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002822 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002823 int do_switch = 1;
2824
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002825 if (likely(!ctx))
2826 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002827
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002828 cpuctx = __get_cpu_context(ctx);
2829 if (!cpuctx->task_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002830 return;
2831
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002832 rcu_read_lock();
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02002833 next_ctx = next->perf_event_ctxp[ctxn];
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002834 if (!next_ctx)
2835 goto unlock;
2836
2837 parent = rcu_dereference(ctx->parent_ctx);
2838 next_parent = rcu_dereference(next_ctx->parent_ctx);
2839
2840 /* If neither context have a parent context; they cannot be clones. */
Jiri Olsa802c8a62014-09-12 13:18:28 +02002841 if (!parent && !next_parent)
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002842 goto unlock;
2843
2844 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002845 /*
2846 * Looks like the two contexts are clones, so we might be
2847 * able to optimize the context switch. We lock both
2848 * contexts and check that they are clones under the
2849 * lock (including re-checking that neither has been
2850 * uncloned in the meantime). It doesn't matter which
2851 * order we take the locks because no other cpu could
2852 * be trying to lock both of these tasks.
2853 */
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002854 raw_spin_lock(&ctx->lock);
2855 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002856 if (context_equiv(ctx, next_ctx)) {
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002857 WRITE_ONCE(ctx->task, next);
2858 WRITE_ONCE(next_ctx->task, task);
Yan, Zheng5a158c32014-11-04 21:56:02 -05002859
2860 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2861
Peter Zijlstra63b6da32016-01-14 16:05:37 +01002862 /*
2863 * RCU_INIT_POINTER here is safe because we've not
2864 * modified the ctx and the above modification of
2865 * ctx->task and ctx->task_ctx_data are immaterial
2866 * since those values are always verified under
2867 * ctx->lock which we're now holding.
2868 */
2869 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2870 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2871
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002872 do_switch = 0;
2873
2874 perf_event_sync_stat(ctx, next_ctx);
2875 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01002876 raw_spin_unlock(&next_ctx->lock);
2877 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002878 }
Peter Zijlstra5a3126d2013-10-07 17:12:48 +02002879unlock:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002880 rcu_read_unlock();
2881
2882 if (do_switch) {
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002883 raw_spin_lock(&ctx->lock);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02002884 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002885 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002886 }
2887}
2888
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002889static DEFINE_PER_CPU(struct list_head, sched_cb_list);
2890
Yan, Zhengba532502014-11-04 21:55:58 -05002891void perf_sched_cb_dec(struct pmu *pmu)
2892{
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002893 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2894
Yan, Zhengba532502014-11-04 21:55:58 -05002895 this_cpu_dec(perf_sched_cb_usages);
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002896
2897 if (!--cpuctx->sched_cb_usage)
2898 list_del(&cpuctx->sched_cb_entry);
Yan, Zhengba532502014-11-04 21:55:58 -05002899}
2900
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002901
Yan, Zhengba532502014-11-04 21:55:58 -05002902void perf_sched_cb_inc(struct pmu *pmu)
2903{
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002904 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2905
2906 if (!cpuctx->sched_cb_usage++)
2907 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
2908
Yan, Zhengba532502014-11-04 21:55:58 -05002909 this_cpu_inc(perf_sched_cb_usages);
2910}
2911
2912/*
2913 * This function provides the context switch callback to the lower code
2914 * layer. It is invoked ONLY when the context switch callback is enabled.
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +02002915 *
2916 * This callback is relevant even to per-cpu events; for example multi event
2917 * PEBS requires this to provide PID/TID information. This requires we flush
2918 * all queued PEBS records before we context switch to a new task.
Yan, Zhengba532502014-11-04 21:55:58 -05002919 */
2920static void perf_pmu_sched_task(struct task_struct *prev,
2921 struct task_struct *next,
2922 bool sched_in)
2923{
2924 struct perf_cpu_context *cpuctx;
2925 struct pmu *pmu;
Yan, Zhengba532502014-11-04 21:55:58 -05002926
2927 if (prev == next)
2928 return;
2929
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002930 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
David Carrillo-Cisneros1fd7e412017-01-18 11:24:54 -08002931 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
Yan, Zhengba532502014-11-04 21:55:58 -05002932
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002933 if (WARN_ON_ONCE(!pmu->sched_task))
2934 continue;
Yan, Zhengba532502014-11-04 21:55:58 -05002935
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002936 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2937 perf_pmu_disable(pmu);
Yan, Zhengba532502014-11-04 21:55:58 -05002938
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002939 pmu->sched_task(cpuctx->task_ctx, sched_in);
Yan, Zhengba532502014-11-04 21:55:58 -05002940
Peter Zijlstrae48c1782016-07-06 09:18:30 +02002941 perf_pmu_enable(pmu);
2942 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
Yan, Zhengba532502014-11-04 21:55:58 -05002943 }
Yan, Zhengba532502014-11-04 21:55:58 -05002944}
2945
Adrian Hunter45ac1402015-07-21 12:44:02 +03002946static void perf_event_switch(struct task_struct *task,
2947 struct task_struct *next_prev, bool sched_in);
2948
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02002949#define for_each_task_context_nr(ctxn) \
2950 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2951
2952/*
2953 * Called from scheduler to remove the events of the current task,
2954 * with interrupts disabled.
2955 *
2956 * We stop each event and update the event value in event->count.
2957 *
2958 * This does not protect us against NMI, but disable()
2959 * sets the disabled bit in the control field of event _before_
2960 * accessing the event control register. If a NMI hits, then it will
2961 * not restart the event.
2962 */
Jiri Olsaab0cce52012-05-23 13:13:02 +02002963void __perf_event_task_sched_out(struct task_struct *task,
2964 struct task_struct *next)
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02002965{
2966 int ctxn;
2967
Yan, Zhengba532502014-11-04 21:55:58 -05002968 if (__this_cpu_read(perf_sched_cb_usages))
2969 perf_pmu_sched_task(task, next, false);
2970
Adrian Hunter45ac1402015-07-21 12:44:02 +03002971 if (atomic_read(&nr_switch_events))
2972 perf_event_switch(task, next, false);
2973
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02002974 for_each_task_context_nr(ctxn)
2975 perf_event_context_sched_out(task, ctxn, next);
Stephane Eraniane5d13672011-02-14 11:20:01 +02002976
2977 /*
2978 * if cgroup events exist on this CPU, then we need
2979 * to check if we have to switch out PMU state.
2980 * cgroup event are system-wide mode only
2981 */
Christoph Lameter4a32fea2014-08-17 12:30:27 -05002982 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
Stephane Eraniana8d757e2011-08-25 15:58:03 +02002983 perf_cgroup_sched_out(task, next);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02002984}
2985
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002986/*
2987 * Called with IRQs disabled
2988 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002989static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2990 enum event_type_t event_type)
2991{
2992 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002993}
2994
2995static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002996ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01002997 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002998{
2999 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003000
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003001 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
3002 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003003 continue;
Stephane Eranian5632ab12011-01-03 18:20:01 +02003004 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003005 continue;
3006
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08003007 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01003008 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003009
3010 /*
3011 * If this pinned group hasn't been scheduled,
3012 * put it in error state.
3013 */
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02003014 if (event->state == PERF_EVENT_STATE_INACTIVE)
3015 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003016 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003017}
3018
3019static void
3020ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01003021 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003022{
3023 struct perf_event *event;
3024 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003025
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003026 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
3027 /* Ignore events in OFF or ERROR state */
3028 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003029 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003030 /*
3031 * Listen to the 'cpu' scheduling filter constraint
3032 * of events:
3033 */
Stephane Eranian5632ab12011-01-03 18:20:01 +02003034 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003035 continue;
3036
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003037 if (group_can_go_on(event, cpuctx, can_add_hw)) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01003038 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003039 can_add_hw = 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003040 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003041 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003042}
3043
3044static void
3045ctx_sched_in(struct perf_event_context *ctx,
3046 struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +02003047 enum event_type_t event_type,
3048 struct task_struct *task)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003049{
Peter Zijlstradb24d332011-04-09 21:17:45 +02003050 int is_active = ctx->is_active;
Peter Zijlstrac994d612016-01-08 09:20:23 +01003051 u64 now;
Stephane Eraniane5d13672011-02-14 11:20:01 +02003052
Peter Zijlstrac994d612016-01-08 09:20:23 +01003053 lockdep_assert_held(&ctx->lock);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003054
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003055 if (likely(!ctx->nr_events))
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003056 return;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003057
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01003058 ctx->is_active |= (event_type | EVENT_TIME);
Peter Zijlstra63e30d32016-01-08 11:39:10 +01003059 if (ctx->task) {
3060 if (!is_active)
3061 cpuctx->task_ctx = ctx;
3062 else
3063 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3064 }
3065
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01003066 is_active ^= ctx->is_active; /* changed bits */
3067
3068 if (is_active & EVENT_TIME) {
3069 /* start ctx time */
3070 now = perf_clock();
3071 ctx->timestamp = now;
3072 perf_cgroup_set_timestamp(task, ctx);
3073 }
3074
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003075 /*
3076 * First go through the list and put on any pinned groups
3077 * in order to give them the best chance of going on.
3078 */
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01003079 if (is_active & EVENT_PINNED)
Peter Zijlstra6e377382010-02-11 13:21:58 +01003080 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01003081
3082 /* Then walk through the lower prio flexible groups */
Peter Zijlstra3cbaa592016-02-24 18:45:47 +01003083 if (is_active & EVENT_FLEXIBLE)
Peter Zijlstra6e377382010-02-11 13:21:58 +01003084 ctx_flexible_sched_in(ctx, cpuctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003085}
3086
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003087static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +02003088 enum event_type_t event_type,
3089 struct task_struct *task)
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003090{
3091 struct perf_event_context *ctx = &cpuctx->ctx;
3092
Stephane Eraniane5d13672011-02-14 11:20:01 +02003093 ctx_sched_in(ctx, cpuctx, event_type, task);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003094}
3095
Stephane Eraniane5d13672011-02-14 11:20:01 +02003096static void perf_event_context_sched_in(struct perf_event_context *ctx,
3097 struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003098{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003099 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003100
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003101 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003102 if (cpuctx->task_ctx == ctx)
3103 return;
3104
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003105 perf_ctx_lock(cpuctx, ctx);
leilei.linfdccc3f2017-08-09 08:29:21 +08003106 /*
3107 * We must check ctx->nr_events while holding ctx->lock, such
3108 * that we serialize against perf_install_in_context().
3109 */
3110 if (!ctx->nr_events)
3111 goto unlock;
3112
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02003113 perf_pmu_disable(ctx->pmu);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003114 /*
3115 * We want to keep the following priority order:
3116 * cpu pinned (that don't need to move), task pinned,
3117 * cpu flexible, task flexible.
Alexander Shishkinfe45baf2017-01-19 18:43:29 +02003118 *
3119 * However, if task's ctx is not carrying any pinned
3120 * events, no need to flip the cpuctx's events around.
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01003121 */
Alexander Shishkinfe45baf2017-01-19 18:43:29 +02003122 if (!list_empty(&ctx->pinned_groups))
3123 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Peter Zijlstra63e30d32016-01-08 11:39:10 +01003124 perf_event_sched_in(cpuctx, ctx, task);
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003125 perf_pmu_enable(ctx->pmu);
leilei.linfdccc3f2017-08-09 08:29:21 +08003126
3127unlock:
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003128 perf_ctx_unlock(cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003129}
3130
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003131/*
3132 * Called from scheduler to add the events of the current task
3133 * with interrupts disabled.
3134 *
3135 * We restore the event value and then enable it.
3136 *
3137 * This does not protect us against NMI, but enable()
3138 * sets the enabled bit in the control field of event _before_
3139 * accessing the event control register. If a NMI hits, then it will
3140 * keep the event running.
3141 */
Jiri Olsaab0cce52012-05-23 13:13:02 +02003142void __perf_event_task_sched_in(struct task_struct *prev,
3143 struct task_struct *task)
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003144{
3145 struct perf_event_context *ctx;
3146 int ctxn;
3147
Peter Zijlstra7e41d172016-01-08 09:21:40 +01003148 /*
3149 * If cgroup events exist on this CPU, then we need to check if we have
3150 * to switch in PMU state; cgroup event are system-wide mode only.
3151 *
3152 * Since cgroup events are CPU events, we must schedule these in before
3153 * we schedule in the task events.
3154 */
3155 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3156 perf_cgroup_sched_in(prev, task);
3157
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003158 for_each_task_context_nr(ctxn) {
3159 ctx = task->perf_event_ctxp[ctxn];
3160 if (likely(!ctx))
3161 continue;
3162
Stephane Eraniane5d13672011-02-14 11:20:01 +02003163 perf_event_context_sched_in(ctx, task);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003164 }
Stephane Eraniand010b332012-02-09 23:21:00 +01003165
Adrian Hunter45ac1402015-07-21 12:44:02 +03003166 if (atomic_read(&nr_switch_events))
3167 perf_event_switch(task, prev, true);
3168
Yan, Zhengba532502014-11-04 21:55:58 -05003169 if (__this_cpu_read(perf_sched_cb_usages))
3170 perf_pmu_sched_task(prev, task, true);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003171}
3172
Peter Zijlstraabd50712010-01-26 18:50:16 +01003173static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3174{
3175 u64 frequency = event->attr.sample_freq;
3176 u64 sec = NSEC_PER_SEC;
3177 u64 divisor, dividend;
3178
3179 int count_fls, nsec_fls, frequency_fls, sec_fls;
3180
3181 count_fls = fls64(count);
3182 nsec_fls = fls64(nsec);
3183 frequency_fls = fls64(frequency);
3184 sec_fls = 30;
3185
3186 /*
3187 * We got @count in @nsec, with a target of sample_freq HZ
3188 * the target period becomes:
3189 *
3190 * @count * 10^9
3191 * period = -------------------
3192 * @nsec * sample_freq
3193 *
3194 */
3195
3196 /*
3197 * Reduce accuracy by one bit such that @a and @b converge
3198 * to a similar magnitude.
3199 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003200#define REDUCE_FLS(a, b) \
Peter Zijlstraabd50712010-01-26 18:50:16 +01003201do { \
3202 if (a##_fls > b##_fls) { \
3203 a >>= 1; \
3204 a##_fls--; \
3205 } else { \
3206 b >>= 1; \
3207 b##_fls--; \
3208 } \
3209} while (0)
3210
3211 /*
3212 * Reduce accuracy until either term fits in a u64, then proceed with
3213 * the other, so that finally we can do a u64/u64 division.
3214 */
3215 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3216 REDUCE_FLS(nsec, frequency);
3217 REDUCE_FLS(sec, count);
3218 }
3219
3220 if (count_fls + sec_fls > 64) {
3221 divisor = nsec * frequency;
3222
3223 while (count_fls + sec_fls > 64) {
3224 REDUCE_FLS(count, sec);
3225 divisor >>= 1;
3226 }
3227
3228 dividend = count * sec;
3229 } else {
3230 dividend = count * sec;
3231
3232 while (nsec_fls + frequency_fls > 64) {
3233 REDUCE_FLS(nsec, frequency);
3234 dividend >>= 1;
3235 }
3236
3237 divisor = nsec * frequency;
3238 }
3239
Peter Zijlstraf6ab91ad2010-06-04 15:18:01 +02003240 if (!divisor)
3241 return dividend;
3242
Peter Zijlstraabd50712010-01-26 18:50:16 +01003243 return div64_u64(dividend, divisor);
3244}
3245
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003246static DEFINE_PER_CPU(int, perf_throttled_count);
3247static DEFINE_PER_CPU(u64, perf_throttled_seq);
3248
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003249static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003250{
3251 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91ad2010-06-04 15:18:01 +02003252 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003253 s64 delta;
3254
Peter Zijlstraabd50712010-01-26 18:50:16 +01003255 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003256
3257 delta = (s64)(period - hwc->sample_period);
3258 delta = (delta + 7) / 8; /* low pass filter */
3259
3260 sample_period = hwc->sample_period + delta;
3261
3262 if (!sample_period)
3263 sample_period = 1;
3264
3265 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01003266
Peter Zijlstrae7850592010-05-21 14:43:08 +02003267 if (local64_read(&hwc->period_left) > 8*sample_period) {
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003268 if (disable)
3269 event->pmu->stop(event, PERF_EF_UPDATE);
3270
Peter Zijlstrae7850592010-05-21 14:43:08 +02003271 local64_set(&hwc->period_left, 0);
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003272
3273 if (disable)
3274 event->pmu->start(event, PERF_EF_RELOAD);
Peter Zijlstraabd50712010-01-26 18:50:16 +01003275 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003276}
3277
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003278/*
3279 * combine freq adjustment with unthrottling to avoid two passes over the
3280 * events. At the same time, make sure, having freq events does not change
3281 * the rate of unthrottling as that would introduce bias.
3282 */
3283static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3284 int needs_unthr)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003285{
3286 struct perf_event *event;
3287 struct hw_perf_event *hwc;
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003288 u64 now, period = TICK_NSEC;
Peter Zijlstraabd50712010-01-26 18:50:16 +01003289 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003290
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003291 /*
3292 * only need to iterate over all events iff:
3293 * - context have events in frequency mode (needs freq adjust)
3294 * - there are events to unthrottle on this cpu
3295 */
3296 if (!(ctx->nr_freq || needs_unthr))
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01003297 return;
3298
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003299 raw_spin_lock(&ctx->lock);
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003300 perf_pmu_disable(ctx->pmu);
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003301
Paul Mackerras03541f82009-10-14 16:58:03 +11003302 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003303 if (event->state != PERF_EVENT_STATE_ACTIVE)
3304 continue;
3305
Stephane Eranian5632ab12011-01-03 18:20:01 +02003306 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01003307 continue;
3308
Alexander Shishkin44377272013-12-16 14:17:36 +02003309 perf_pmu_disable(event->pmu);
3310
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003311 hwc = &event->hw;
3312
Jiri Olsaae23bff2013-08-24 16:45:54 +02003313 if (hwc->interrupts == MAX_INTERRUPTS) {
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003314 hwc->interrupts = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003315 perf_log_throttle(event, 1);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02003316 event->pmu->start(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003317 }
3318
3319 if (!event->attr.freq || !event->attr.sample_freq)
Alexander Shishkin44377272013-12-16 14:17:36 +02003320 goto next;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003321
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003322 /*
3323 * stop the event and update event->count
3324 */
3325 event->pmu->stop(event, PERF_EF_UPDATE);
3326
Peter Zijlstrae7850592010-05-21 14:43:08 +02003327 now = local64_read(&event->count);
Peter Zijlstraabd50712010-01-26 18:50:16 +01003328 delta = now - hwc->freq_count_stamp;
3329 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003330
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003331 /*
3332 * restart the event
3333 * reload only if value has changed
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003334 * we have stopped the event so tell that
3335 * to perf_adjust_period() to avoid stopping it
3336 * twice.
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003337 */
Peter Zijlstraabd50712010-01-26 18:50:16 +01003338 if (delta > 0)
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003339 perf_adjust_period(event, period, delta, false);
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003340
3341 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
Alexander Shishkin44377272013-12-16 14:17:36 +02003342 next:
3343 perf_pmu_enable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003344 }
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003345
Stephane Eranianf39d47f2012-02-07 14:39:57 +01003346 perf_pmu_enable(ctx->pmu);
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003347 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003348}
3349
3350/*
3351 * Round-robin a context's events:
3352 */
3353static void rotate_ctx(struct perf_event_context *ctx)
3354{
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01003355 /*
3356 * Rotate the first entry last of non-pinned groups. Rotation might be
3357 * disabled by the inheritance code.
3358 */
3359 if (!ctx->rotate_disable)
3360 list_rotate_left(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003361}
3362
Stephane Eranian9e630202013-04-03 14:21:33 +02003363static int perf_rotate_context(struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003364{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003365 struct perf_event_context *ctx = NULL;
Mark Rutland2fde4f92015-01-07 15:01:54 +00003366 int rotate = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003367
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003368 if (cpuctx->ctx.nr_events) {
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003369 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3370 rotate = 1;
3371 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003372
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003373 ctx = cpuctx->task_ctx;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003374 if (ctx && ctx->nr_events) {
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003375 if (ctx->nr_events != ctx->nr_active)
3376 rotate = 1;
3377 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003378
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003379 if (!rotate)
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01003380 goto done;
3381
Peter Zijlstrafacc4302011-04-09 21:17:42 +02003382 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02003383 perf_pmu_disable(cpuctx->ctx.pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003384
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003385 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3386 if (ctx)
3387 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
Peter Zijlstrad4944a02010-03-08 13:51:20 +01003388
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003389 rotate_ctx(&cpuctx->ctx);
3390 if (ctx)
3391 rotate_ctx(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003392
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003393 perf_event_sched_in(cpuctx, ctx, current);
Peter Zijlstra0f5a2602011-11-16 14:38:16 +01003394
3395 perf_pmu_enable(cpuctx->ctx.pmu);
3396 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02003397done:
Stephane Eranian9e630202013-04-03 14:21:33 +02003398
3399 return rotate;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003400}
3401
3402void perf_event_task_tick(void)
3403{
Mark Rutland2fde4f92015-01-07 15:01:54 +00003404 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3405 struct perf_event_context *ctx, *tmp;
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003406 int throttled;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003407
Frederic Weisbecker16444642017-11-06 16:01:24 +01003408 lockdep_assert_irqs_disabled();
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02003409
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003410 __this_cpu_inc(perf_throttled_seq);
3411 throttled = __this_cpu_xchg(perf_throttled_count, 0);
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02003412 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003413
Mark Rutland2fde4f92015-01-07 15:01:54 +00003414 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
Stephane Eraniane050e3f2012-01-26 17:03:19 +01003415 perf_adjust_freq_unthr_context(ctx, throttled);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003416}
3417
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003418static int event_enable_on_exec(struct perf_event *event,
3419 struct perf_event_context *ctx)
3420{
3421 if (!event->attr.enable_on_exec)
3422 return 0;
3423
3424 event->attr.enable_on_exec = 0;
3425 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3426 return 0;
3427
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02003428 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003429
3430 return 1;
3431}
3432
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003433/*
3434 * Enable all of a task's events that have been marked enable-on-exec.
3435 * This expects task == current.
3436 */
Peter Zijlstrac1274492015-12-10 20:57:40 +01003437static void perf_event_enable_on_exec(int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003438{
Peter Zijlstrac1274492015-12-10 20:57:40 +01003439 struct perf_event_context *ctx, *clone_ctx = NULL;
Alexander Shishkin487f05e2017-01-19 18:43:30 +02003440 enum event_type_t event_type = 0;
Peter Zijlstra3e349502016-01-08 10:01:18 +01003441 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003442 struct perf_event *event;
3443 unsigned long flags;
3444 int enabled = 0;
3445
3446 local_irq_save(flags);
Peter Zijlstrac1274492015-12-10 20:57:40 +01003447 ctx = current->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003448 if (!ctx || !ctx->nr_events)
3449 goto out;
3450
Peter Zijlstra3e349502016-01-08 10:01:18 +01003451 cpuctx = __get_cpu_context(ctx);
3452 perf_ctx_lock(cpuctx, ctx);
Peter Zijlstra7fce2502016-02-24 18:45:48 +01003453 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02003454 list_for_each_entry(event, &ctx->event_list, event_entry) {
Peter Zijlstra3e349502016-01-08 10:01:18 +01003455 enabled |= event_enable_on_exec(event, ctx);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02003456 event_type |= get_event_type(event);
3457 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003458
3459 /*
Peter Zijlstra3e349502016-01-08 10:01:18 +01003460 * Unclone and reschedule this context if we enabled any event.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003461 */
Peter Zijlstra3e349502016-01-08 10:01:18 +01003462 if (enabled) {
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003463 clone_ctx = unclone_ctx(ctx);
Alexander Shishkin487f05e2017-01-19 18:43:30 +02003464 ctx_resched(cpuctx, ctx, event_type);
Peter Zijlstra7bbba0e2017-02-15 16:12:20 +01003465 } else {
3466 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
Peter Zijlstra3e349502016-01-08 10:01:18 +01003467 }
3468 perf_ctx_unlock(cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003469
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003470out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003471 local_irq_restore(flags);
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003472
3473 if (clone_ctx)
3474 put_ctx(clone_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003475}
3476
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003477struct perf_read_data {
3478 struct perf_event *event;
3479 bool group;
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003480 int ret;
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003481};
3482
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003483static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003484{
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003485 u16 local_pkg, event_pkg;
3486
3487 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003488 int local_cpu = smp_processor_id();
3489
3490 event_pkg = topology_physical_package_id(event_cpu);
3491 local_pkg = topology_physical_package_id(local_cpu);
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003492
3493 if (event_pkg == local_pkg)
3494 return local_cpu;
3495 }
3496
3497 return event_cpu;
3498}
3499
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003500/*
3501 * Cross CPU call to read the hardware event
3502 */
3503static void __perf_event_read(void *info)
3504{
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003505 struct perf_read_data *data = info;
3506 struct perf_event *sub, *event = data->event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003507 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003508 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003509 struct pmu *pmu = event->pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003510
3511 /*
3512 * If this is a task context, we need to check whether it is
3513 * the current task context of this cpu. If not it has been
3514 * scheduled out before the smp call arrived. In that case
3515 * event->count would have been updated to a recent sample
3516 * when the event was scheduled out.
3517 */
3518 if (ctx->task && cpuctx->task_ctx != ctx)
3519 return;
3520
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003521 raw_spin_lock(&ctx->lock);
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003522 if (ctx->is_active & EVENT_TIME) {
Peter Zijlstra542e72f2011-01-26 15:38:35 +01003523 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02003524 update_cgrp_time_from_event(event);
3525 }
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003526
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02003527 perf_event_update_time(event);
3528 if (data->group)
3529 perf_event_update_sibling_time(event);
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003530
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003531 if (event->state != PERF_EVENT_STATE_ACTIVE)
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003532 goto unlock;
3533
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003534 if (!data->group) {
3535 pmu->read(event);
3536 data->ret = 0;
3537 goto unlock;
3538 }
3539
3540 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3541
3542 pmu->read(event);
3543
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003544 list_for_each_entry(sub, &event->sibling_list, group_entry) {
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003545 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3546 /*
3547 * Use sibling's PMU rather than @event's since
3548 * sibling could be on different (eg: software) PMU.
3549 */
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003550 sub->pmu->read(sub);
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003551 }
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003552 }
Sukadev Bhattiprolu4a00c162015-09-03 20:07:51 -07003553
3554 data->ret = pmu->commit_txn(pmu);
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003555
3556unlock:
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003557 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003558}
3559
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003560static inline u64 perf_event_count(struct perf_event *event)
3561{
Vikas Shivappac39a0e22017-07-25 14:14:20 -07003562 return local64_read(&event->count) + atomic64_read(&event->child_count);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003563}
3564
Kaixu Xiaffe86902015-08-06 07:02:32 +00003565/*
3566 * NMI-safe method to read a local event, that is an event that
3567 * is:
3568 * - either for the current task, or for this CPU
3569 * - does not have inherit set, for inherited task events
3570 * will not be local and we cannot read them atomically
3571 * - must not have a pmu::count method
3572 */
Yonghong Song7d9285e2017-10-05 09:19:19 -07003573int perf_event_read_local(struct perf_event *event, u64 *value,
3574 u64 *enabled, u64 *running)
Kaixu Xiaffe86902015-08-06 07:02:32 +00003575{
3576 unsigned long flags;
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003577 int ret = 0;
Kaixu Xiaffe86902015-08-06 07:02:32 +00003578
3579 /*
3580 * Disabling interrupts avoids all counter scheduling (context
3581 * switches, timer based rotation and IPIs).
3582 */
3583 local_irq_save(flags);
3584
Kaixu Xiaffe86902015-08-06 07:02:32 +00003585 /*
3586 * It must not be an event with inherit set, we cannot read
3587 * all child counters from atomic context.
3588 */
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003589 if (event->attr.inherit) {
3590 ret = -EOPNOTSUPP;
3591 goto out;
3592 }
Kaixu Xiaffe86902015-08-06 07:02:32 +00003593
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003594 /* If this is a per-task event, it must be for current */
3595 if ((event->attach_state & PERF_ATTACH_TASK) &&
3596 event->hw.target != current) {
3597 ret = -EINVAL;
3598 goto out;
3599 }
3600
3601 /* If this is a per-CPU event, it must be for this CPU */
3602 if (!(event->attach_state & PERF_ATTACH_TASK) &&
3603 event->cpu != smp_processor_id()) {
3604 ret = -EINVAL;
3605 goto out;
3606 }
Kaixu Xiaffe86902015-08-06 07:02:32 +00003607
3608 /*
3609 * If the event is currently on this CPU, its either a per-task event,
3610 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3611 * oncpu == -1).
3612 */
3613 if (event->oncpu == smp_processor_id())
3614 event->pmu->read(event);
3615
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003616 *value = local64_read(&event->count);
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02003617 if (enabled || running) {
3618 u64 now = event->shadow_ctx_time + perf_clock();
3619 u64 __enabled, __running;
3620
3621 __perf_update_times(event, now, &__enabled, &__running);
3622 if (enabled)
3623 *enabled = __enabled;
3624 if (running)
3625 *running = __running;
3626 }
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003627out:
Kaixu Xiaffe86902015-08-06 07:02:32 +00003628 local_irq_restore(flags);
3629
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07003630 return ret;
Kaixu Xiaffe86902015-08-06 07:02:32 +00003631}
3632
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003633static int perf_event_read(struct perf_event *event, bool group)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003634{
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003635 enum perf_event_state state = READ_ONCE(event->state);
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003636 int event_cpu, ret = 0;
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003637
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003638 /*
3639 * If event is enabled and currently active on a CPU, update the
3640 * value in the event structure:
3641 */
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003642again:
3643 if (state == PERF_EVENT_STATE_ACTIVE) {
3644 struct perf_read_data data;
3645
3646 /*
3647 * Orders the ->state and ->oncpu loads such that if we see
3648 * ACTIVE we must also see the right ->oncpu.
3649 *
3650 * Matches the smp_wmb() from event_sched_in().
3651 */
3652 smp_rmb();
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003653
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003654 event_cpu = READ_ONCE(event->oncpu);
3655 if ((unsigned)event_cpu >= nr_cpu_ids)
3656 return 0;
3657
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003658 data = (struct perf_read_data){
3659 .event = event,
3660 .group = group,
3661 .ret = 0,
3662 };
3663
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003664 preempt_disable();
3665 event_cpu = __perf_event_read_cpu(event, event_cpu);
David Carrillo-Cisnerosd6a2f9032016-08-17 13:55:06 -07003666
Peter Zijlstra58763142016-08-30 10:15:03 +02003667 /*
3668 * Purposely ignore the smp_call_function_single() return
3669 * value.
3670 *
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003671 * If event_cpu isn't a valid CPU it means the event got
Peter Zijlstra58763142016-08-30 10:15:03 +02003672 * scheduled out and that will have updated the event count.
3673 *
3674 * Therefore, either way, we'll have an up-to-date event count
3675 * after this.
3676 */
Peter Zijlstra451d24d2017-01-31 11:27:10 +01003677 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
3678 preempt_enable();
Peter Zijlstra58763142016-08-30 10:15:03 +02003679 ret = data.ret;
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003680
3681 } else if (state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01003682 struct perf_event_context *ctx = event->ctx;
3683 unsigned long flags;
3684
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003685 raw_spin_lock_irqsave(&ctx->lock, flags);
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003686 state = event->state;
3687 if (state != PERF_EVENT_STATE_INACTIVE) {
3688 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3689 goto again;
3690 }
3691
Stephane Eranianc530ccd2010-10-15 15:26:01 +02003692 /*
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003693 * May read while context is not active (e.g., thread is
3694 * blocked), in that case we cannot update context time
Stephane Eranianc530ccd2010-10-15 15:26:01 +02003695 */
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003696 if (ctx->is_active & EVENT_TIME) {
Stephane Eranianc530ccd2010-10-15 15:26:01 +02003697 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02003698 update_cgrp_time_from_event(event);
3699 }
Peter Zijlstra0c1cbc12017-09-05 16:26:44 +02003700
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02003701 perf_event_update_time(event);
Peter Zijlstra0492d4c2015-09-03 20:07:48 -07003702 if (group)
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02003703 perf_event_update_sibling_time(event);
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003704 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003705 }
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07003706
3707 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003708}
3709
3710/*
3711 * Initialize the perf_event context in a task_struct:
3712 */
Peter Zijlstraeb184472010-09-07 15:55:13 +02003713static void __perf_event_init_context(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003714{
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003715 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003716 mutex_init(&ctx->mutex);
Mark Rutland2fde4f92015-01-07 15:01:54 +00003717 INIT_LIST_HEAD(&ctx->active_ctx_list);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01003718 INIT_LIST_HEAD(&ctx->pinned_groups);
3719 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003720 INIT_LIST_HEAD(&ctx->event_list);
3721 atomic_set(&ctx->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003722}
3723
Peter Zijlstraeb184472010-09-07 15:55:13 +02003724static struct perf_event_context *
3725alloc_perf_context(struct pmu *pmu, struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003726{
3727 struct perf_event_context *ctx;
Peter Zijlstraeb184472010-09-07 15:55:13 +02003728
3729 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3730 if (!ctx)
3731 return NULL;
3732
3733 __perf_event_init_context(ctx);
3734 if (task) {
3735 ctx->task = task;
3736 get_task_struct(task);
3737 }
3738 ctx->pmu = pmu;
3739
3740 return ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003741}
3742
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003743static struct task_struct *
3744find_lively_task_by_vpid(pid_t vpid)
3745{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003746 struct task_struct *task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003747
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003748 rcu_read_lock();
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003749 if (!vpid)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003750 task = current;
3751 else
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003752 task = find_task_by_vpid(vpid);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003753 if (task)
3754 get_task_struct(task);
3755 rcu_read_unlock();
3756
3757 if (!task)
3758 return ERR_PTR(-ESRCH);
3759
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003760 return task;
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07003761}
3762
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003763/*
3764 * Returns a matching context with refcount and pincount.
3765 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003766static struct perf_event_context *
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003767find_get_context(struct pmu *pmu, struct task_struct *task,
3768 struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003769{
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003770 struct perf_event_context *ctx, *clone_ctx = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003771 struct perf_cpu_context *cpuctx;
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003772 void *task_ctx_data = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003773 unsigned long flags;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003774 int ctxn, err;
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003775 int cpu = event->cpu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003776
Oleg Nesterov22a4ec72011-01-18 17:10:08 +01003777 if (!task) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003778 /* Must be root to operate on a CPU event: */
3779 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3780 return ERR_PTR(-EACCES);
3781
Peter Zijlstra108b02c2010-09-06 14:32:03 +02003782 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003783 ctx = &cpuctx->ctx;
3784 get_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003785 ++ctx->pin_count;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003786
3787 return ctx;
3788 }
3789
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003790 err = -EINVAL;
3791 ctxn = pmu->task_ctx_nr;
3792 if (ctxn < 0)
3793 goto errout;
3794
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003795 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3796 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3797 if (!task_ctx_data) {
3798 err = -ENOMEM;
3799 goto errout;
3800 }
3801 }
3802
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003803retry:
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02003804 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003805 if (ctx) {
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003806 clone_ctx = unclone_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003807 ++ctx->pin_count;
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003808
3809 if (task_ctx_data && !ctx->task_ctx_data) {
3810 ctx->task_ctx_data = task_ctx_data;
3811 task_ctx_data = NULL;
3812 }
Thomas Gleixnere625cce12009-11-17 18:02:06 +01003813 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Peter Zijlstra211de6e2014-09-30 19:23:08 +02003814
3815 if (clone_ctx)
3816 put_ctx(clone_ctx);
Peter Zijlstra9137fb22011-04-09 21:17:41 +02003817 } else {
Peter Zijlstraeb184472010-09-07 15:55:13 +02003818 ctx = alloc_perf_context(pmu, task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003819 err = -ENOMEM;
3820 if (!ctx)
3821 goto errout;
Peter Zijlstraeb184472010-09-07 15:55:13 +02003822
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003823 if (task_ctx_data) {
3824 ctx->task_ctx_data = task_ctx_data;
3825 task_ctx_data = NULL;
3826 }
3827
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01003828 err = 0;
3829 mutex_lock(&task->perf_event_mutex);
3830 /*
3831 * If it has already passed perf_event_exit_task().
3832 * we must see PF_EXITING, it takes this mutex too.
3833 */
3834 if (task->flags & PF_EXITING)
3835 err = -ESRCH;
3836 else if (task->perf_event_ctxp[ctxn])
3837 err = -EAGAIN;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003838 else {
Peter Zijlstra9137fb22011-04-09 21:17:41 +02003839 get_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003840 ++ctx->pin_count;
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01003841 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01003842 }
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01003843 mutex_unlock(&task->perf_event_mutex);
3844
3845 if (unlikely(err)) {
Peter Zijlstra9137fb22011-04-09 21:17:41 +02003846 put_ctx(ctx);
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01003847
3848 if (err == -EAGAIN)
3849 goto retry;
3850 goto errout;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003851 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003852 }
3853
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003854 kfree(task_ctx_data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003855 return ctx;
3856
Peter Zijlstra9ed60602010-06-11 17:36:35 +02003857errout:
Yan, Zheng4af57ef2014-11-04 21:56:01 -05003858 kfree(task_ctx_data);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003859 return ERR_PTR(err);
3860}
3861
Li Zefan6fb29152009-10-15 11:21:42 +08003862static void perf_event_free_filter(struct perf_event *event);
Alexei Starovoitov25415172015-03-25 12:49:20 -07003863static void perf_event_free_bpf_prog(struct perf_event *event);
Li Zefan6fb29152009-10-15 11:21:42 +08003864
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003865static void free_event_rcu(struct rcu_head *head)
3866{
3867 struct perf_event *event;
3868
3869 event = container_of(head, struct perf_event, rcu_head);
3870 if (event->ns)
3871 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08003872 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003873 kfree(event);
3874}
3875
Peter Zijlstrab69cf532014-03-14 10:50:33 +01003876static void ring_buffer_attach(struct perf_event *event,
3877 struct ring_buffer *rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003878
Kan Liangf2fb6be2016-03-23 11:24:37 -07003879static void detach_sb_event(struct perf_event *event)
3880{
3881 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
3882
3883 raw_spin_lock(&pel->lock);
3884 list_del_rcu(&event->sb_list);
3885 raw_spin_unlock(&pel->lock);
3886}
3887
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003888static bool is_sb_event(struct perf_event *event)
Kan Liangf2fb6be2016-03-23 11:24:37 -07003889{
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003890 struct perf_event_attr *attr = &event->attr;
3891
Kan Liangf2fb6be2016-03-23 11:24:37 -07003892 if (event->parent)
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003893 return false;
Kan Liangf2fb6be2016-03-23 11:24:37 -07003894
3895 if (event->attach_state & PERF_ATTACH_TASK)
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003896 return false;
Kan Liangf2fb6be2016-03-23 11:24:37 -07003897
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07003898 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
3899 attr->comm || attr->comm_exec ||
3900 attr->task ||
3901 attr->context_switch)
3902 return true;
3903 return false;
3904}
3905
3906static void unaccount_pmu_sb_event(struct perf_event *event)
3907{
3908 if (is_sb_event(event))
3909 detach_sb_event(event);
Kan Liangf2fb6be2016-03-23 11:24:37 -07003910}
3911
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003912static void unaccount_event_cpu(struct perf_event *event, int cpu)
3913{
3914 if (event->parent)
3915 return;
3916
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003917 if (is_cgroup_event(event))
3918 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3919}
3920
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02003921#ifdef CONFIG_NO_HZ_FULL
3922static DEFINE_SPINLOCK(nr_freq_lock);
3923#endif
3924
3925static void unaccount_freq_event_nohz(void)
3926{
3927#ifdef CONFIG_NO_HZ_FULL
3928 spin_lock(&nr_freq_lock);
3929 if (atomic_dec_and_test(&nr_freq_events))
3930 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
3931 spin_unlock(&nr_freq_lock);
3932#endif
3933}
3934
3935static void unaccount_freq_event(void)
3936{
3937 if (tick_nohz_full_enabled())
3938 unaccount_freq_event_nohz();
3939 else
3940 atomic_dec(&nr_freq_events);
3941}
3942
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003943static void unaccount_event(struct perf_event *event)
3944{
Peter Zijlstra25432ae2016-01-08 11:05:09 +01003945 bool dec = false;
3946
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003947 if (event->parent)
3948 return;
3949
3950 if (event->attach_state & PERF_ATTACH_TASK)
Peter Zijlstra25432ae2016-01-08 11:05:09 +01003951 dec = true;
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003952 if (event->attr.mmap || event->attr.mmap_data)
3953 atomic_dec(&nr_mmap_events);
3954 if (event->attr.comm)
3955 atomic_dec(&nr_comm_events);
Hari Bathinie4222672017-03-08 02:11:36 +05303956 if (event->attr.namespaces)
3957 atomic_dec(&nr_namespaces_events);
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003958 if (event->attr.task)
3959 atomic_dec(&nr_task_events);
Frederic Weisbecker948b26b2013-08-02 18:29:55 +02003960 if (event->attr.freq)
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02003961 unaccount_freq_event();
Adrian Hunter45ac1402015-07-21 12:44:02 +03003962 if (event->attr.context_switch) {
Peter Zijlstra25432ae2016-01-08 11:05:09 +01003963 dec = true;
Adrian Hunter45ac1402015-07-21 12:44:02 +03003964 atomic_dec(&nr_switch_events);
3965 }
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003966 if (is_cgroup_event(event))
Peter Zijlstra25432ae2016-01-08 11:05:09 +01003967 dec = true;
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003968 if (has_branch_stack(event))
Peter Zijlstra25432ae2016-01-08 11:05:09 +01003969 dec = true;
3970
Peter Zijlstra9107c892016-02-24 18:45:45 +01003971 if (dec) {
3972 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3973 schedule_delayed_work(&perf_sched_work, HZ);
3974 }
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003975
3976 unaccount_event_cpu(event, event->cpu);
Kan Liangf2fb6be2016-03-23 11:24:37 -07003977
3978 unaccount_pmu_sb_event(event);
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02003979}
3980
Peter Zijlstra9107c892016-02-24 18:45:45 +01003981static void perf_sched_delayed(struct work_struct *work)
3982{
3983 mutex_lock(&perf_sched_mutex);
3984 if (atomic_dec_and_test(&perf_sched_count))
3985 static_branch_disable(&perf_sched_events);
3986 mutex_unlock(&perf_sched_mutex);
3987}
3988
Alexander Shishkinbed5b252015-01-30 12:31:06 +02003989/*
3990 * The following implement mutual exclusion of events on "exclusive" pmus
3991 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3992 * at a time, so we disallow creating events that might conflict, namely:
3993 *
3994 * 1) cpu-wide events in the presence of per-task events,
3995 * 2) per-task events in the presence of cpu-wide events,
3996 * 3) two matching events on the same context.
3997 *
3998 * The former two cases are handled in the allocation path (perf_event_alloc(),
Peter Zijlstraa0733e62016-01-26 12:14:40 +01003999 * _free_event()), the latter -- before the first perf_install_in_context().
Alexander Shishkinbed5b252015-01-30 12:31:06 +02004000 */
4001static int exclusive_event_init(struct perf_event *event)
4002{
4003 struct pmu *pmu = event->pmu;
4004
4005 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4006 return 0;
4007
4008 /*
4009 * Prevent co-existence of per-task and cpu-wide events on the
4010 * same exclusive pmu.
4011 *
4012 * Negative pmu::exclusive_cnt means there are cpu-wide
4013 * events on this "exclusive" pmu, positive means there are
4014 * per-task events.
4015 *
4016 * Since this is called in perf_event_alloc() path, event::ctx
4017 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
4018 * to mean "per-task event", because unlike other attach states it
4019 * never gets cleared.
4020 */
4021 if (event->attach_state & PERF_ATTACH_TASK) {
4022 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
4023 return -EBUSY;
4024 } else {
4025 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
4026 return -EBUSY;
4027 }
4028
4029 return 0;
4030}
4031
4032static void exclusive_event_destroy(struct perf_event *event)
4033{
4034 struct pmu *pmu = event->pmu;
4035
4036 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4037 return;
4038
4039 /* see comment in exclusive_event_init() */
4040 if (event->attach_state & PERF_ATTACH_TASK)
4041 atomic_dec(&pmu->exclusive_cnt);
4042 else
4043 atomic_inc(&pmu->exclusive_cnt);
4044}
4045
4046static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
4047{
Alexander Shishkin3bf62152016-09-20 18:48:11 +03004048 if ((e1->pmu == e2->pmu) &&
Alexander Shishkinbed5b252015-01-30 12:31:06 +02004049 (e1->cpu == e2->cpu ||
4050 e1->cpu == -1 ||
4051 e2->cpu == -1))
4052 return true;
4053 return false;
4054}
4055
4056/* Called under the same ctx::mutex as perf_install_in_context() */
4057static bool exclusive_event_installable(struct perf_event *event,
4058 struct perf_event_context *ctx)
4059{
4060 struct perf_event *iter_event;
4061 struct pmu *pmu = event->pmu;
4062
4063 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
4064 return true;
4065
4066 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
4067 if (exclusive_event_match(iter_event, event))
4068 return false;
4069 }
4070
4071 return true;
4072}
4073
Alexander Shishkin375637b2016-04-27 18:44:46 +03004074static void perf_addr_filters_splice(struct perf_event *event,
4075 struct list_head *head);
4076
Peter Zijlstra683ede42014-05-05 12:11:24 +02004077static void _free_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004078{
Peter Zijlstrae360adb2010-10-14 14:01:34 +08004079 irq_work_sync(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004080
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02004081 unaccount_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004082
Frederic Weisbecker76369132011-05-19 19:55:04 +02004083 if (event->rb) {
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02004084 /*
4085 * Can happen when we close an event with re-directed output.
4086 *
4087 * Since we have a 0 refcount, perf_mmap_close() will skip
4088 * over us; possibly making our ring_buffer_put() the last.
4089 */
4090 mutex_lock(&event->mmap_mutex);
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004091 ring_buffer_attach(event, NULL);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02004092 mutex_unlock(&event->mmap_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004093 }
4094
Stephane Eraniane5d13672011-02-14 11:20:01 +02004095 if (is_cgroup_event(event))
4096 perf_detach_cgroup(event);
4097
Peter Zijlstraa0733e62016-01-26 12:14:40 +01004098 if (!event->parent) {
4099 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
4100 put_callchain_buffers();
4101 }
4102
4103 perf_event_free_bpf_prog(event);
Alexander Shishkin375637b2016-04-27 18:44:46 +03004104 perf_addr_filters_splice(event, NULL);
4105 kfree(event->addr_filters_offs);
Peter Zijlstraa0733e62016-01-26 12:14:40 +01004106
4107 if (event->destroy)
4108 event->destroy(event);
4109
4110 if (event->ctx)
4111 put_ctx(event->ctx);
4112
Alexander Shishkin62a92c82016-06-07 15:44:15 +03004113 exclusive_event_destroy(event);
4114 module_put(event->pmu->module);
Peter Zijlstraa0733e62016-01-26 12:14:40 +01004115
4116 call_rcu(&event->rcu_head, free_event_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004117}
4118
Peter Zijlstra683ede42014-05-05 12:11:24 +02004119/*
4120 * Used to free events which have a known refcount of 1, such as in error paths
4121 * where the event isn't exposed yet and inherited events.
4122 */
4123static void free_event(struct perf_event *event)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004124{
Peter Zijlstra683ede42014-05-05 12:11:24 +02004125 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
4126 "unexpected event refcount: %ld; ptr=%p\n",
4127 atomic_long_read(&event->refcount), event)) {
4128 /* leak to avoid use-after-free */
4129 return;
4130 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004131
Peter Zijlstra683ede42014-05-05 12:11:24 +02004132 _free_event(event);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004133}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004134
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004135/*
Jiri Olsaf8697762014-08-01 14:33:01 +02004136 * Remove user event from the owner task.
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004137 */
Jiri Olsaf8697762014-08-01 14:33:01 +02004138static void perf_remove_from_owner(struct perf_event *event)
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004139{
Peter Zijlstra88821352010-11-09 19:01:43 +01004140 struct task_struct *owner;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004141
Peter Zijlstra88821352010-11-09 19:01:43 +01004142 rcu_read_lock();
Peter Zijlstra88821352010-11-09 19:01:43 +01004143 /*
Peter Zijlstraf47c02c2016-01-26 12:30:14 +01004144 * Matches the smp_store_release() in perf_event_exit_task(). If we
4145 * observe !owner it means the list deletion is complete and we can
4146 * indeed free this event, otherwise we need to serialize on
Peter Zijlstra88821352010-11-09 19:01:43 +01004147 * owner->perf_event_mutex.
4148 */
Will Deacon506458e2017-10-24 11:22:48 +01004149 owner = READ_ONCE(event->owner);
Peter Zijlstra88821352010-11-09 19:01:43 +01004150 if (owner) {
4151 /*
4152 * Since delayed_put_task_struct() also drops the last
4153 * task reference we can safely take a new reference
4154 * while holding the rcu_read_lock().
4155 */
4156 get_task_struct(owner);
4157 }
4158 rcu_read_unlock();
4159
4160 if (owner) {
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004161 /*
4162 * If we're here through perf_event_exit_task() we're already
4163 * holding ctx->mutex which would be an inversion wrt. the
4164 * normal lock order.
4165 *
4166 * However we can safely take this lock because its the child
4167 * ctx->mutex.
4168 */
4169 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
4170
Peter Zijlstra88821352010-11-09 19:01:43 +01004171 /*
4172 * We have to re-check the event->owner field, if it is cleared
4173 * we raced with perf_event_exit_task(), acquiring the mutex
4174 * ensured they're done, and we can proceed with freeing the
4175 * event.
4176 */
Peter Zijlstraf47c02c2016-01-26 12:30:14 +01004177 if (event->owner) {
Peter Zijlstra88821352010-11-09 19:01:43 +01004178 list_del_init(&event->owner_entry);
Peter Zijlstraf47c02c2016-01-26 12:30:14 +01004179 smp_store_release(&event->owner, NULL);
4180 }
Peter Zijlstra88821352010-11-09 19:01:43 +01004181 mutex_unlock(&owner->perf_event_mutex);
4182 put_task_struct(owner);
4183 }
Jiri Olsaf8697762014-08-01 14:33:01 +02004184}
4185
Jiri Olsaf8697762014-08-01 14:33:01 +02004186static void put_event(struct perf_event *event)
4187{
Jiri Olsaf8697762014-08-01 14:33:01 +02004188 if (!atomic_long_dec_and_test(&event->refcount))
4189 return;
4190
Peter Zijlstra683ede42014-05-05 12:11:24 +02004191 _free_event(event);
Al Viroa6fa9412012-08-20 14:59:25 +01004192}
4193
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004194/*
4195 * Kill an event dead; while event:refcount will preserve the event
4196 * object, it will not preserve its functionality. Once the last 'user'
4197 * gives up the object, we'll destroy the thing.
4198 */
Peter Zijlstra683ede42014-05-05 12:11:24 +02004199int perf_event_release_kernel(struct perf_event *event)
4200{
Peter Zijlstraa4f4bb62016-02-24 18:45:42 +01004201 struct perf_event_context *ctx = event->ctx;
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004202 struct perf_event *child, *tmp;
Peter Zijlstra82d94852018-01-09 13:10:30 +01004203 LIST_HEAD(free_list);
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004204
Peter Zijlstraa4f4bb62016-02-24 18:45:42 +01004205 /*
4206 * If we got here through err_file: fput(event_file); we will not have
4207 * attached to a context yet.
4208 */
4209 if (!ctx) {
4210 WARN_ON_ONCE(event->attach_state &
4211 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4212 goto no_ctx;
4213 }
4214
Peter Zijlstra88821352010-11-09 19:01:43 +01004215 if (!is_kernel_event(event))
4216 perf_remove_from_owner(event);
4217
Peter Zijlstra5fa7c8e2016-01-26 15:25:15 +01004218 ctx = perf_event_ctx_lock(event);
Peter Zijlstra683ede42014-05-05 12:11:24 +02004219 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004220 perf_remove_from_context(event, DETACH_GROUP);
Peter Zijlstra88821352010-11-09 19:01:43 +01004221
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004222 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra60beda82016-01-26 14:55:02 +01004223 /*
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +01004224 * Mark this event as STATE_DEAD, there is no external reference to it
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004225 * anymore.
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004226 *
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004227 * Anybody acquiring event->child_mutex after the below loop _must_
4228 * also see this, most importantly inherit_event() which will avoid
4229 * placing more children on the list.
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004230 *
4231 * Thus this guarantees that we will in fact observe and kill _ALL_
4232 * child events.
Peter Zijlstra60beda82016-01-26 14:55:02 +01004233 */
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004234 event->state = PERF_EVENT_STATE_DEAD;
4235 raw_spin_unlock_irq(&ctx->lock);
4236
4237 perf_event_ctx_unlock(event, ctx);
Peter Zijlstra60beda82016-01-26 14:55:02 +01004238
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004239again:
4240 mutex_lock(&event->child_mutex);
4241 list_for_each_entry(child, &event->child_list, child_list) {
Al Viroa6fa9412012-08-20 14:59:25 +01004242
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004243 /*
4244 * Cannot change, child events are not migrated, see the
4245 * comment with perf_event_ctx_lock_nested().
4246 */
Will Deacon506458e2017-10-24 11:22:48 +01004247 ctx = READ_ONCE(child->ctx);
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004248 /*
4249 * Since child_mutex nests inside ctx::mutex, we must jump
4250 * through hoops. We start by grabbing a reference on the ctx.
4251 *
4252 * Since the event cannot get freed while we hold the
4253 * child_mutex, the context must also exist and have a !0
4254 * reference count.
4255 */
4256 get_ctx(ctx);
4257
4258 /*
4259 * Now that we have a ctx ref, we can drop child_mutex, and
4260 * acquire ctx::mutex without fear of it going away. Then we
4261 * can re-acquire child_mutex.
4262 */
4263 mutex_unlock(&event->child_mutex);
4264 mutex_lock(&ctx->mutex);
4265 mutex_lock(&event->child_mutex);
4266
4267 /*
4268 * Now that we hold ctx::mutex and child_mutex, revalidate our
4269 * state, if child is still the first entry, it didn't get freed
4270 * and we can continue doing so.
4271 */
4272 tmp = list_first_entry_or_null(&event->child_list,
4273 struct perf_event, child_list);
4274 if (tmp == child) {
4275 perf_remove_from_context(child, DETACH_GROUP);
Peter Zijlstra82d94852018-01-09 13:10:30 +01004276 list_move(&child->child_list, &free_list);
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004277 /*
4278 * This matches the refcount bump in inherit_event();
4279 * this can't be the last reference.
4280 */
4281 put_event(event);
4282 }
4283
4284 mutex_unlock(&event->child_mutex);
4285 mutex_unlock(&ctx->mutex);
4286 put_ctx(ctx);
4287 goto again;
4288 }
4289 mutex_unlock(&event->child_mutex);
4290
Peter Zijlstra82d94852018-01-09 13:10:30 +01004291 list_for_each_entry_safe(child, tmp, &free_list, child_list) {
4292 list_del(&child->child_list);
4293 free_event(child);
4294 }
4295
Peter Zijlstraa4f4bb62016-02-24 18:45:42 +01004296no_ctx:
4297 put_event(event); /* Must be the 'last' reference */
Peter Zijlstra683ede42014-05-05 12:11:24 +02004298 return 0;
4299}
4300EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4301
Peter Zijlstra8b10c5e2015-05-01 16:08:46 +02004302/*
4303 * Called when the last reference to the file is gone.
4304 */
Al Viroa6fa9412012-08-20 14:59:25 +01004305static int perf_release(struct inode *inode, struct file *file)
4306{
Peter Zijlstrac6e5b732016-01-15 16:07:41 +02004307 perf_event_release_kernel(file->private_data);
Al Viroa6fa9412012-08-20 14:59:25 +01004308 return 0;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01004309}
4310
Peter Zijlstraca0dd442017-09-05 13:23:44 +02004311static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004312{
4313 struct perf_event *child;
4314 u64 total = 0;
4315
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004316 *enabled = 0;
4317 *running = 0;
4318
Peter Zijlstra6f105812009-11-20 22:19:56 +01004319 mutex_lock(&event->child_mutex);
Sukadev Bhattiprolu01add3e2015-09-03 20:07:46 -07004320
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004321 (void)perf_event_read(event, false);
Sukadev Bhattiprolu01add3e2015-09-03 20:07:46 -07004322 total += perf_event_count(event);
4323
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004324 *enabled += event->total_time_enabled +
4325 atomic64_read(&event->child_total_time_enabled);
4326 *running += event->total_time_running +
4327 atomic64_read(&event->child_total_time_running);
4328
4329 list_for_each_entry(child, &event->child_list, child_list) {
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004330 (void)perf_event_read(child, false);
Sukadev Bhattiprolu01add3e2015-09-03 20:07:46 -07004331 total += perf_event_count(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004332 *enabled += child->total_time_enabled;
4333 *running += child->total_time_running;
4334 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01004335 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004336
4337 return total;
4338}
Peter Zijlstraca0dd442017-09-05 13:23:44 +02004339
4340u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
4341{
4342 struct perf_event_context *ctx;
4343 u64 count;
4344
4345 ctx = perf_event_ctx_lock(event);
4346 count = __perf_event_read_value(event, enabled, running);
4347 perf_event_ctx_unlock(event, ctx);
4348
4349 return count;
4350}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02004351EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004352
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004353static int __perf_read_group_add(struct perf_event *leader,
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004354 u64 read_format, u64 *values)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004355{
Jiri Olsa2aeb1882017-07-20 16:14:55 +02004356 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004357 struct perf_event *sub;
Jiri Olsa2aeb1882017-07-20 16:14:55 +02004358 unsigned long flags;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004359 int n = 1; /* skip @nr */
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004360 int ret;
Peter Zijlstraabf48682009-11-20 22:19:49 +01004361
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004362 ret = perf_event_read(leader, true);
4363 if (ret)
4364 return ret;
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004365
Peter Zijlstraa9cd8192017-09-05 13:38:24 +02004366 raw_spin_lock_irqsave(&ctx->lock, flags);
4367
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004368 /*
4369 * Since we co-schedule groups, {enabled,running} times of siblings
4370 * will be identical to those of the leader, so we only publish one
4371 * set.
4372 */
4373 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4374 values[n++] += leader->total_time_enabled +
4375 atomic64_read(&leader->child_total_time_enabled);
4376 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004377
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004378 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4379 values[n++] += leader->total_time_running +
4380 atomic64_read(&leader->child_total_time_running);
4381 }
4382
4383 /*
4384 * Write {count,id} tuples for every sibling.
4385 */
4386 values[n++] += perf_event_count(leader);
Peter Zijlstraabf48682009-11-20 22:19:49 +01004387 if (read_format & PERF_FORMAT_ID)
4388 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004389
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004390 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004391 values[n++] += perf_event_count(sub);
Peter Zijlstraabf48682009-11-20 22:19:49 +01004392 if (read_format & PERF_FORMAT_ID)
4393 values[n++] = primary_event_id(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004394 }
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004395
Jiri Olsa2aeb1882017-07-20 16:14:55 +02004396 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004397 return 0;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004398}
4399
4400static int perf_read_group(struct perf_event *event,
4401 u64 read_format, char __user *buf)
4402{
4403 struct perf_event *leader = event->group_leader, *child;
4404 struct perf_event_context *ctx = leader->ctx;
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004405 int ret;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004406 u64 *values;
4407
4408 lockdep_assert_held(&ctx->mutex);
4409
4410 values = kzalloc(event->read_size, GFP_KERNEL);
4411 if (!values)
4412 return -ENOMEM;
4413
4414 values[0] = 1 + leader->nr_siblings;
4415
4416 /*
4417 * By locking the child_mutex of the leader we effectively
4418 * lock the child list of all siblings.. XXX explain how.
4419 */
4420 mutex_lock(&leader->child_mutex);
4421
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004422 ret = __perf_read_group_add(leader, read_format, values);
4423 if (ret)
4424 goto unlock;
4425
4426 list_for_each_entry(child, &leader->child_list, child_list) {
4427 ret = __perf_read_group_add(child, read_format, values);
4428 if (ret)
4429 goto unlock;
4430 }
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004431
4432 mutex_unlock(&leader->child_mutex);
4433
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004434 ret = event->read_size;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004435 if (copy_to_user(buf, values, event->read_size))
4436 ret = -EFAULT;
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004437 goto out;
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004438
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004439unlock:
4440 mutex_unlock(&leader->child_mutex);
4441out:
Peter Zijlstrafa8c2692015-09-03 20:07:49 -07004442 kfree(values);
Peter Zijlstraabf48682009-11-20 22:19:49 +01004443 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004444}
4445
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004446static int perf_read_one(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004447 u64 read_format, char __user *buf)
4448{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004449 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004450 u64 values[4];
4451 int n = 0;
4452
Peter Zijlstraca0dd442017-09-05 13:23:44 +02004453 values[n++] = __perf_event_read_value(event, &enabled, &running);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01004454 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4455 values[n++] = enabled;
4456 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4457 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004458 if (read_format & PERF_FORMAT_ID)
4459 values[n++] = primary_event_id(event);
4460
4461 if (copy_to_user(buf, values, n * sizeof(u64)))
4462 return -EFAULT;
4463
4464 return n * sizeof(u64);
4465}
4466
Jiri Olsadc633982014-09-12 13:18:26 +02004467static bool is_event_hup(struct perf_event *event)
4468{
4469 bool no_children;
4470
Peter Zijlstraa69b0ca2016-02-24 18:45:44 +01004471 if (event->state > PERF_EVENT_STATE_EXIT)
Jiri Olsadc633982014-09-12 13:18:26 +02004472 return false;
4473
4474 mutex_lock(&event->child_mutex);
4475 no_children = list_empty(&event->child_list);
4476 mutex_unlock(&event->child_mutex);
4477 return no_children;
4478}
4479
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004480/*
4481 * Read the performance event - simple non blocking version for now
4482 */
4483static ssize_t
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004484__perf_read(struct perf_event *event, char __user *buf, size_t count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004485{
4486 u64 read_format = event->attr.read_format;
4487 int ret;
4488
4489 /*
4490 * Return end-of-file for a read on a event that is in
4491 * error state (i.e. because it was pinned but it couldn't be
4492 * scheduled on to the CPU at some point).
4493 */
4494 if (event->state == PERF_EVENT_STATE_ERROR)
4495 return 0;
4496
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02004497 if (count < event->read_size)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004498 return -ENOSPC;
4499
4500 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004501 if (read_format & PERF_FORMAT_GROUP)
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004502 ret = perf_read_group(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004503 else
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004504 ret = perf_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004505
4506 return ret;
4507}
4508
4509static ssize_t
4510perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4511{
4512 struct perf_event *event = file->private_data;
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004513 struct perf_event_context *ctx;
4514 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004515
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004516 ctx = perf_event_ctx_lock(event);
Peter Zijlstra (Intel)b15f4952015-09-03 20:07:47 -07004517 ret = __perf_read(event, buf, count);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004518 perf_event_ctx_unlock(event, ctx);
4519
4520 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004521}
4522
4523static unsigned int perf_poll(struct file *file, poll_table *wait)
4524{
4525 struct perf_event *event = file->private_data;
Frederic Weisbecker76369132011-05-19 19:55:04 +02004526 struct ring_buffer *rb;
Jiri Olsa61b67682014-08-13 19:39:56 +02004527 unsigned int events = POLLHUP;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004528
Sebastian Andrzej Siewiore708d7a2014-08-04 15:31:08 +02004529 poll_wait(file, &event->waitq, wait);
Jiri Olsa179033b2014-08-07 11:48:26 -04004530
Jiri Olsadc633982014-09-12 13:18:26 +02004531 if (is_event_hup(event))
Jiri Olsa179033b2014-08-07 11:48:26 -04004532 return events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004533
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004534 /*
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02004535 * Pin the event->rb by taking event->mmap_mutex; otherwise
4536 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004537 */
4538 mutex_lock(&event->mmap_mutex);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02004539 rb = event->rb;
4540 if (rb)
Frederic Weisbecker76369132011-05-19 19:55:04 +02004541 events = atomic_xchg(&rb->poll, 0);
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004542 mutex_unlock(&event->mmap_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004543 return events;
4544}
4545
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004546static void _perf_event_reset(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004547{
Sukadev Bhattiprolu7d889622015-09-03 20:07:50 -07004548 (void)perf_event_read(event, false);
Peter Zijlstrae7850592010-05-21 14:43:08 +02004549 local64_set(&event->count, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004550 perf_event_update_userpage(event);
4551}
4552
4553/*
4554 * Holding the top-level event's child_mutex means that any
4555 * descendant process that has inherited this event will block
Peter Zijlstra8ba289b2016-01-26 13:06:56 +01004556 * in perf_event_exit_event() if it goes to exit, thus satisfying the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004557 * task existence requirements of perf_event_enable/disable.
4558 */
4559static void perf_event_for_each_child(struct perf_event *event,
4560 void (*func)(struct perf_event *))
4561{
4562 struct perf_event *child;
4563
4564 WARN_ON_ONCE(event->ctx->parent_ctx);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004565
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004566 mutex_lock(&event->child_mutex);
4567 func(event);
4568 list_for_each_entry(child, &event->child_list, child_list)
4569 func(child);
4570 mutex_unlock(&event->child_mutex);
4571}
4572
4573static void perf_event_for_each(struct perf_event *event,
4574 void (*func)(struct perf_event *))
4575{
4576 struct perf_event_context *ctx = event->ctx;
4577 struct perf_event *sibling;
4578
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004579 lockdep_assert_held(&ctx->mutex);
4580
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004581 event = event->group_leader;
4582
4583 perf_event_for_each_child(event, func);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004584 list_for_each_entry(sibling, &event->sibling_list, group_entry)
Michael Ellerman724b6da2012-04-11 11:54:13 +10004585 perf_event_for_each_child(sibling, func);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004586}
4587
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01004588static void __perf_event_period(struct perf_event *event,
4589 struct perf_cpu_context *cpuctx,
4590 struct perf_event_context *ctx,
4591 void *info)
Peter Zijlstra00179602015-11-30 16:26:35 +01004592{
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01004593 u64 value = *((u64 *)info);
Peter Zijlstrac7999c62015-08-04 19:22:49 +02004594 bool active;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004595
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004596 if (event->attr.freq) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004597 event->attr.sample_freq = value;
4598 } else {
4599 event->attr.sample_period = value;
4600 event->hw.sample_period = value;
4601 }
Peter Zijlstrabad71922013-11-27 13:54:38 +00004602
4603 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4604 if (active) {
4605 perf_pmu_disable(ctx->pmu);
Peter Zijlstra1e02cd42016-03-10 15:39:24 +01004606 /*
4607 * We could be throttled; unthrottle now to avoid the tick
4608 * trying to unthrottle while we already re-started the event.
4609 */
4610 if (event->hw.interrupts == MAX_INTERRUPTS) {
4611 event->hw.interrupts = 0;
4612 perf_log_throttle(event, 1);
4613 }
Peter Zijlstrabad71922013-11-27 13:54:38 +00004614 event->pmu->stop(event, PERF_EF_UPDATE);
4615 }
4616
4617 local64_set(&event->hw.period_left, 0);
4618
4619 if (active) {
4620 event->pmu->start(event, PERF_EF_RELOAD);
4621 perf_pmu_enable(ctx->pmu);
4622 }
Peter Zijlstrac7999c62015-08-04 19:22:49 +02004623}
4624
4625static int perf_event_period(struct perf_event *event, u64 __user *arg)
4626{
Peter Zijlstrac7999c62015-08-04 19:22:49 +02004627 u64 value;
4628
4629 if (!is_sampling_event(event))
4630 return -EINVAL;
4631
4632 if (copy_from_user(&value, arg, sizeof(value)))
4633 return -EFAULT;
4634
4635 if (!value)
4636 return -EINVAL;
4637
4638 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4639 return -EINVAL;
4640
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01004641 event_function_call(event, __perf_event_period, &value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004642
Peter Zijlstrac7999c62015-08-04 19:22:49 +02004643 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004644}
4645
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004646static const struct file_operations perf_fops;
4647
Al Viro2903ff02012-08-28 12:52:22 -04004648static inline int perf_fget_light(int fd, struct fd *p)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004649{
Al Viro2903ff02012-08-28 12:52:22 -04004650 struct fd f = fdget(fd);
4651 if (!f.file)
4652 return -EBADF;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004653
Al Viro2903ff02012-08-28 12:52:22 -04004654 if (f.file->f_op != &perf_fops) {
4655 fdput(f);
4656 return -EBADF;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004657 }
Al Viro2903ff02012-08-28 12:52:22 -04004658 *p = f;
4659 return 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004660}
4661
4662static int perf_event_set_output(struct perf_event *event,
4663 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08004664static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Alexei Starovoitov25415172015-03-25 12:49:20 -07004665static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004666
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004667static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004668{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004669 void (*func)(struct perf_event *);
4670 u32 flags = arg;
4671
4672 switch (cmd) {
4673 case PERF_EVENT_IOC_ENABLE:
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004674 func = _perf_event_enable;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004675 break;
4676 case PERF_EVENT_IOC_DISABLE:
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004677 func = _perf_event_disable;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004678 break;
4679 case PERF_EVENT_IOC_RESET:
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004680 func = _perf_event_reset;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004681 break;
4682
4683 case PERF_EVENT_IOC_REFRESH:
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004684 return _perf_event_refresh(event, arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004685
4686 case PERF_EVENT_IOC_PERIOD:
4687 return perf_event_period(event, (u64 __user *)arg);
4688
Jiri Olsacf4957f2012-10-24 13:37:58 +02004689 case PERF_EVENT_IOC_ID:
4690 {
4691 u64 id = primary_event_id(event);
4692
4693 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4694 return -EFAULT;
4695 return 0;
4696 }
4697
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004698 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004699 {
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004700 int ret;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004701 if (arg != -1) {
Al Viro2903ff02012-08-28 12:52:22 -04004702 struct perf_event *output_event;
4703 struct fd output;
4704 ret = perf_fget_light(arg, &output);
4705 if (ret)
4706 return ret;
4707 output_event = output.file->private_data;
4708 ret = perf_event_set_output(event, output_event);
4709 fdput(output);
4710 } else {
4711 ret = perf_event_set_output(event, NULL);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004712 }
Peter Zijlstraac9721f2010-05-27 12:54:41 +02004713 return ret;
4714 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004715
Li Zefan6fb29152009-10-15 11:21:42 +08004716 case PERF_EVENT_IOC_SET_FILTER:
4717 return perf_event_set_filter(event, (void __user *)arg);
4718
Alexei Starovoitov25415172015-03-25 12:49:20 -07004719 case PERF_EVENT_IOC_SET_BPF:
4720 return perf_event_set_bpf_prog(event, arg);
4721
Wang Nan86e79722016-03-28 06:41:29 +00004722 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
4723 struct ring_buffer *rb;
4724
4725 rcu_read_lock();
4726 rb = rcu_dereference(event->rb);
4727 if (!rb || !rb->nr_pages) {
4728 rcu_read_unlock();
4729 return -EINVAL;
4730 }
4731 rb_toggle_paused(rb, !!arg);
4732 rcu_read_unlock();
4733 return 0;
4734 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004735 default:
4736 return -ENOTTY;
4737 }
4738
4739 if (flags & PERF_IOC_FLAG_GROUP)
4740 perf_event_for_each(event, func);
4741 else
4742 perf_event_for_each_child(event, func);
4743
4744 return 0;
4745}
4746
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004747static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4748{
4749 struct perf_event *event = file->private_data;
4750 struct perf_event_context *ctx;
4751 long ret;
4752
4753 ctx = perf_event_ctx_lock(event);
4754 ret = _perf_ioctl(event, cmd, arg);
4755 perf_event_ctx_unlock(event, ctx);
4756
4757 return ret;
4758}
4759
Pawel Mollb3f20782014-06-13 16:03:32 +01004760#ifdef CONFIG_COMPAT
4761static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4762 unsigned long arg)
4763{
4764 switch (_IOC_NR(cmd)) {
4765 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4766 case _IOC_NR(PERF_EVENT_IOC_ID):
4767 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4768 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4769 cmd &= ~IOCSIZE_MASK;
4770 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4771 }
4772 break;
4773 }
4774 return perf_ioctl(file, cmd, arg);
4775}
4776#else
4777# define perf_compat_ioctl NULL
4778#endif
4779
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004780int perf_event_task_enable(void)
4781{
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004782 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004783 struct perf_event *event;
4784
4785 mutex_lock(&current->perf_event_mutex);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004786 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4787 ctx = perf_event_ctx_lock(event);
4788 perf_event_for_each_child(event, _perf_event_enable);
4789 perf_event_ctx_unlock(event, ctx);
4790 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004791 mutex_unlock(&current->perf_event_mutex);
4792
4793 return 0;
4794}
4795
4796int perf_event_task_disable(void)
4797{
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004798 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004799 struct perf_event *event;
4800
4801 mutex_lock(&current->perf_event_mutex);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01004802 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4803 ctx = perf_event_ctx_lock(event);
4804 perf_event_for_each_child(event, _perf_event_disable);
4805 perf_event_ctx_unlock(event, ctx);
4806 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004807 mutex_unlock(&current->perf_event_mutex);
4808
4809 return 0;
4810}
4811
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004812static int perf_event_index(struct perf_event *event)
4813{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004814 if (event->hw.state & PERF_HES_STOPPED)
4815 return 0;
4816
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004817 if (event->state != PERF_EVENT_STATE_ACTIVE)
4818 return 0;
4819
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01004820 return event->pmu->event_idx(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004821}
4822
Eric B Munsonc4794292011-06-23 16:34:38 -04004823static void calc_timer_values(struct perf_event *event,
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004824 u64 *now,
Eric B Munson7f310a52011-06-23 16:34:38 -04004825 u64 *enabled,
4826 u64 *running)
Eric B Munsonc4794292011-06-23 16:34:38 -04004827{
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004828 u64 ctx_time;
Eric B Munsonc4794292011-06-23 16:34:38 -04004829
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004830 *now = perf_clock();
4831 ctx_time = event->shadow_ctx_time + *now;
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +02004832 __perf_update_times(event, ctx_time, enabled, running);
Eric B Munsonc4794292011-06-23 16:34:38 -04004833}
4834
Peter Zijlstrafa7315872013-09-19 10:16:42 +02004835static void perf_event_init_userpage(struct perf_event *event)
4836{
4837 struct perf_event_mmap_page *userpg;
4838 struct ring_buffer *rb;
4839
4840 rcu_read_lock();
4841 rb = rcu_dereference(event->rb);
4842 if (!rb)
4843 goto unlock;
4844
4845 userpg = rb->user_page;
4846
4847 /* Allow new userspace to detect that bit 0 is deprecated */
4848 userpg->cap_bit0_is_deprecated = 1;
4849 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
Alexander Shishkine8c6dea2015-01-14 14:18:10 +02004850 userpg->data_offset = PAGE_SIZE;
4851 userpg->data_size = perf_data_size(rb);
Peter Zijlstrafa7315872013-09-19 10:16:42 +02004852
4853unlock:
4854 rcu_read_unlock();
4855}
4856
Andy Lutomirskic1317ec2014-10-24 15:58:11 -07004857void __weak arch_perf_update_userpage(
4858 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004859{
4860}
4861
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004862/*
4863 * Callers need to ensure there can be no nesting of this function, otherwise
4864 * the seqlock logic goes bad. We can not serialize this because the arch
4865 * code calls this from NMI context.
4866 */
4867void perf_event_update_userpage(struct perf_event *event)
4868{
4869 struct perf_event_mmap_page *userpg;
Frederic Weisbecker76369132011-05-19 19:55:04 +02004870 struct ring_buffer *rb;
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004871 u64 enabled, running, now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004872
4873 rcu_read_lock();
Peter Zijlstra5ec4c592013-08-02 21:16:30 +02004874 rb = rcu_dereference(event->rb);
4875 if (!rb)
4876 goto unlock;
4877
Eric B Munson0d641202011-06-24 12:26:26 -04004878 /*
4879 * compute total_time_enabled, total_time_running
4880 * based on snapshot values taken when the event
4881 * was last scheduled in.
4882 *
4883 * we cannot simply called update_context_time()
4884 * because of locking issue as we can be called in
4885 * NMI context
4886 */
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004887 calc_timer_values(event, &now, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004888
Frederic Weisbecker76369132011-05-19 19:55:04 +02004889 userpg = rb->user_page;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004890 /*
4891 * Disable preemption so as to not let the corresponding user-space
4892 * spin too long if we get preempted.
4893 */
4894 preempt_disable();
4895 ++userpg->lock;
4896 barrier();
4897 userpg->index = perf_event_index(event);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02004898 userpg->offset = perf_event_count(event);
Peter Zijlstra365a4032011-11-21 20:58:59 +01004899 if (userpg->index)
Peter Zijlstrae7850592010-05-21 14:43:08 +02004900 userpg->offset -= local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004901
Eric B Munson0d641202011-06-24 12:26:26 -04004902 userpg->time_enabled = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004903 atomic64_read(&event->child_total_time_enabled);
4904
Eric B Munson0d641202011-06-24 12:26:26 -04004905 userpg->time_running = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004906 atomic64_read(&event->child_total_time_running);
4907
Andy Lutomirskic1317ec2014-10-24 15:58:11 -07004908 arch_perf_update_userpage(event, userpg, now);
Peter Zijlstrae3f35412011-11-21 11:43:53 +01004909
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004910 barrier();
4911 ++userpg->lock;
4912 preempt_enable();
4913unlock:
4914 rcu_read_unlock();
4915}
4916
Dave Jiang11bac802017-02-24 14:56:41 -08004917static int perf_mmap_fault(struct vm_fault *vmf)
Peter Zijlstra906010b2009-09-21 16:08:49 +02004918{
Dave Jiang11bac802017-02-24 14:56:41 -08004919 struct perf_event *event = vmf->vma->vm_file->private_data;
Frederic Weisbecker76369132011-05-19 19:55:04 +02004920 struct ring_buffer *rb;
Peter Zijlstra906010b2009-09-21 16:08:49 +02004921 int ret = VM_FAULT_SIGBUS;
4922
4923 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4924 if (vmf->pgoff == 0)
4925 ret = 0;
4926 return ret;
4927 }
4928
4929 rcu_read_lock();
Frederic Weisbecker76369132011-05-19 19:55:04 +02004930 rb = rcu_dereference(event->rb);
4931 if (!rb)
Peter Zijlstra906010b2009-09-21 16:08:49 +02004932 goto unlock;
4933
4934 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4935 goto unlock;
4936
Frederic Weisbecker76369132011-05-19 19:55:04 +02004937 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
Peter Zijlstra906010b2009-09-21 16:08:49 +02004938 if (!vmf->page)
4939 goto unlock;
4940
4941 get_page(vmf->page);
Dave Jiang11bac802017-02-24 14:56:41 -08004942 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
Peter Zijlstra906010b2009-09-21 16:08:49 +02004943 vmf->page->index = vmf->pgoff;
4944
4945 ret = 0;
4946unlock:
4947 rcu_read_unlock();
4948
4949 return ret;
4950}
4951
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004952static void ring_buffer_attach(struct perf_event *event,
4953 struct ring_buffer *rb)
4954{
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004955 struct ring_buffer *old_rb = NULL;
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004956 unsigned long flags;
4957
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004958 if (event->rb) {
4959 /*
4960 * Should be impossible, we set this when removing
4961 * event->rb_entry and wait/clear when adding event->rb_entry.
4962 */
4963 WARN_ON_ONCE(event->rcu_pending);
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004964
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004965 old_rb = event->rb;
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004966 spin_lock_irqsave(&old_rb->event_lock, flags);
4967 list_del_rcu(&event->rb_entry);
4968 spin_unlock_irqrestore(&old_rb->event_lock, flags);
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004969
Oleg Nesterov2f993cf2015-05-30 22:04:25 +02004970 event->rcu_batches = get_state_synchronize_rcu();
4971 event->rcu_pending = 1;
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004972 }
Peter Zijlstra10c6db12011-11-26 02:47:31 +01004973
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004974 if (rb) {
Oleg Nesterov2f993cf2015-05-30 22:04:25 +02004975 if (event->rcu_pending) {
4976 cond_synchronize_rcu(event->rcu_batches);
4977 event->rcu_pending = 0;
4978 }
4979
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004980 spin_lock_irqsave(&rb->event_lock, flags);
4981 list_add_rcu(&event->rb_entry, &rb->event_list);
4982 spin_unlock_irqrestore(&rb->event_lock, flags);
4983 }
4984
Alexander Shishkin767ae082016-09-06 16:23:49 +03004985 /*
4986 * Avoid racing with perf_mmap_close(AUX): stop the event
4987 * before swizzling the event::rb pointer; if it's getting
4988 * unmapped, its aux_mmap_count will be 0 and it won't
4989 * restart. See the comment in __perf_pmu_output_stop().
4990 *
4991 * Data will inevitably be lost when set_output is done in
4992 * mid-air, but then again, whoever does it like this is
4993 * not in for the data anyway.
4994 */
4995 if (has_aux(event))
4996 perf_event_stop(event, 0);
4997
Peter Zijlstrab69cf532014-03-14 10:50:33 +01004998 rcu_assign_pointer(event->rb, rb);
4999
5000 if (old_rb) {
5001 ring_buffer_put(old_rb);
5002 /*
5003 * Since we detached before setting the new rb, so that we
5004 * could attach the new rb, we could have missed a wakeup.
5005 * Provide it now.
5006 */
5007 wake_up_all(&event->waitq);
5008 }
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005009}
5010
5011static void ring_buffer_wakeup(struct perf_event *event)
5012{
5013 struct ring_buffer *rb;
5014
5015 rcu_read_lock();
5016 rb = rcu_dereference(event->rb);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005017 if (rb) {
5018 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
5019 wake_up_all(&event->waitq);
5020 }
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005021 rcu_read_unlock();
5022}
5023
Alexander Shishkinfdc26702015-01-14 14:18:16 +02005024struct ring_buffer *ring_buffer_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005025{
Frederic Weisbecker76369132011-05-19 19:55:04 +02005026 struct ring_buffer *rb;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005027
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005028 rcu_read_lock();
Frederic Weisbecker76369132011-05-19 19:55:04 +02005029 rb = rcu_dereference(event->rb);
5030 if (rb) {
5031 if (!atomic_inc_not_zero(&rb->refcount))
5032 rb = NULL;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005033 }
5034 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005035
Frederic Weisbecker76369132011-05-19 19:55:04 +02005036 return rb;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005037}
5038
Alexander Shishkinfdc26702015-01-14 14:18:16 +02005039void ring_buffer_put(struct ring_buffer *rb)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005040{
Frederic Weisbecker76369132011-05-19 19:55:04 +02005041 if (!atomic_dec_and_test(&rb->refcount))
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005042 return;
5043
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005044 WARN_ON_ONCE(!list_empty(&rb->event_list));
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005045
Frederic Weisbecker76369132011-05-19 19:55:04 +02005046 call_rcu(&rb->rcu_head, rb_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005047}
5048
5049static void perf_mmap_open(struct vm_area_struct *vma)
5050{
5051 struct perf_event *event = vma->vm_file->private_data;
5052
5053 atomic_inc(&event->mmap_count);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005054 atomic_inc(&event->rb->mmap_count);
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005055
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005056 if (vma->vm_pgoff)
5057 atomic_inc(&event->rb->aux_mmap_count);
5058
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005059 if (event->pmu->event_mapped)
Peter Zijlstrabfe334922017-08-02 19:39:30 +02005060 event->pmu->event_mapped(event, vma->vm_mm);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005061}
5062
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02005063static void perf_pmu_output_stop(struct perf_event *event);
5064
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005065/*
5066 * A buffer can be mmap()ed multiple times; either directly through the same
5067 * event, or through other events by use of perf_event_set_output().
5068 *
5069 * In order to undo the VM accounting done by perf_mmap() we need to destroy
5070 * the buffer here, where we still have a VM context. This means we need
5071 * to detach all events redirecting to us.
5072 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005073static void perf_mmap_close(struct vm_area_struct *vma)
5074{
5075 struct perf_event *event = vma->vm_file->private_data;
5076
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005077 struct ring_buffer *rb = ring_buffer_get(event);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005078 struct user_struct *mmap_user = rb->mmap_user;
5079 int mmap_locked = rb->mmap_locked;
5080 unsigned long size = perf_data_size(rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005081
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005082 if (event->pmu->event_unmapped)
Peter Zijlstrabfe334922017-08-02 19:39:30 +02005083 event->pmu->event_unmapped(event, vma->vm_mm);
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005084
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005085 /*
5086 * rb->aux_mmap_count will always drop before rb->mmap_count and
5087 * event->mmap_count, so it is ok to use event->mmap_mutex to
5088 * serialize with perf_mmap here.
5089 */
5090 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
5091 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02005092 /*
5093 * Stop all AUX events that are writing to this buffer,
5094 * so that we can free its AUX pages and corresponding PMU
5095 * data. Note that after rb::aux_mmap_count dropped to zero,
5096 * they won't start any more (see perf_aux_output_begin()).
5097 */
5098 perf_pmu_output_stop(event);
5099
5100 /* now it's safe to free the pages */
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005101 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
5102 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
5103
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02005104 /* this has to be the last one */
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005105 rb_free_aux(rb);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02005106 WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
5107
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005108 mutex_unlock(&event->mmap_mutex);
5109 }
5110
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005111 atomic_dec(&rb->mmap_count);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005112
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005113 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005114 goto out_put;
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005115
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005116 ring_buffer_attach(event, NULL);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005117 mutex_unlock(&event->mmap_mutex);
5118
5119 /* If there's still other mmap()s of this buffer, we're done. */
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005120 if (atomic_read(&rb->mmap_count))
5121 goto out_put;
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005122
5123 /*
5124 * No other mmap()s, detach from all other events that might redirect
5125 * into the now unreachable buffer. Somewhat complicated by the
5126 * fact that rb::event_lock otherwise nests inside mmap_mutex.
5127 */
5128again:
5129 rcu_read_lock();
5130 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
5131 if (!atomic_long_inc_not_zero(&event->refcount)) {
5132 /*
5133 * This event is en-route to free_event() which will
5134 * detach it and remove it from the list.
5135 */
5136 continue;
5137 }
5138 rcu_read_unlock();
5139
5140 mutex_lock(&event->mmap_mutex);
5141 /*
5142 * Check we didn't race with perf_event_set_output() which can
5143 * swizzle the rb from under us while we were waiting to
5144 * acquire mmap_mutex.
5145 *
5146 * If we find a different rb; ignore this event, a next
5147 * iteration will no longer find it on the list. We have to
5148 * still restart the iteration to make sure we're not now
5149 * iterating the wrong list.
5150 */
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005151 if (event->rb == rb)
5152 ring_buffer_attach(event, NULL);
5153
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005154 mutex_unlock(&event->mmap_mutex);
5155 put_event(event);
5156
5157 /*
5158 * Restart the iteration; either we're on the wrong list or
5159 * destroyed its integrity by doing a deletion.
5160 */
5161 goto again;
5162 }
5163 rcu_read_unlock();
5164
5165 /*
5166 * It could be there's still a few 0-ref events on the list; they'll
5167 * get cleaned up by free_event() -- they'll also still have their
5168 * ref on the rb and will free it whenever they are done with it.
5169 *
5170 * Aside from that, this buffer is 'fully' detached and unmapped,
5171 * undo the VM accounting.
5172 */
5173
5174 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
5175 vma->vm_mm->pinned_vm -= mmap_locked;
5176 free_uid(mmap_user);
5177
Peter Zijlstrab69cf532014-03-14 10:50:33 +01005178out_put:
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005179 ring_buffer_put(rb); /* could be last */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005180}
5181
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +04005182static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005183 .open = perf_mmap_open,
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005184 .close = perf_mmap_close, /* non mergable */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005185 .fault = perf_mmap_fault,
5186 .page_mkwrite = perf_mmap_fault,
5187};
5188
5189static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5190{
5191 struct perf_event *event = file->private_data;
5192 unsigned long user_locked, user_lock_limit;
5193 struct user_struct *user = current_user();
5194 unsigned long locked, lock_limit;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005195 struct ring_buffer *rb = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005196 unsigned long vma_size;
5197 unsigned long nr_pages;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005198 long user_extra = 0, extra = 0;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02005199 int ret = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005200
Peter Zijlstrac7920612010-05-18 10:33:24 +02005201 /*
5202 * Don't allow mmap() of inherited per-task counters. This would
5203 * create a performance issue due to all children writing to the
Frederic Weisbecker76369132011-05-19 19:55:04 +02005204 * same rb.
Peter Zijlstrac7920612010-05-18 10:33:24 +02005205 */
5206 if (event->cpu == -1 && event->attr.inherit)
5207 return -EINVAL;
5208
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005209 if (!(vma->vm_flags & VM_SHARED))
5210 return -EINVAL;
5211
5212 vma_size = vma->vm_end - vma->vm_start;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005213
5214 if (vma->vm_pgoff == 0) {
5215 nr_pages = (vma_size / PAGE_SIZE) - 1;
5216 } else {
5217 /*
5218 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5219 * mapped, all subsequent mappings should have the same size
5220 * and offset. Must be above the normal perf buffer.
5221 */
5222 u64 aux_offset, aux_size;
5223
5224 if (!event->rb)
5225 return -EINVAL;
5226
5227 nr_pages = vma_size / PAGE_SIZE;
5228
5229 mutex_lock(&event->mmap_mutex);
5230 ret = -EINVAL;
5231
5232 rb = event->rb;
5233 if (!rb)
5234 goto aux_unlock;
5235
Mark Rutland6aa7de02017-10-23 14:07:29 -07005236 aux_offset = READ_ONCE(rb->user_page->aux_offset);
5237 aux_size = READ_ONCE(rb->user_page->aux_size);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005238
5239 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5240 goto aux_unlock;
5241
5242 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5243 goto aux_unlock;
5244
5245 /* already mapped with a different offset */
5246 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5247 goto aux_unlock;
5248
5249 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5250 goto aux_unlock;
5251
5252 /* already mapped with a different size */
5253 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5254 goto aux_unlock;
5255
5256 if (!is_power_of_2(nr_pages))
5257 goto aux_unlock;
5258
5259 if (!atomic_inc_not_zero(&rb->mmap_count))
5260 goto aux_unlock;
5261
5262 if (rb_has_aux(rb)) {
5263 atomic_inc(&rb->aux_mmap_count);
5264 ret = 0;
5265 goto unlock;
5266 }
5267
5268 atomic_set(&rb->aux_mmap_count, 1);
5269 user_extra = nr_pages;
5270
5271 goto accounting;
5272 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005273
5274 /*
Frederic Weisbecker76369132011-05-19 19:55:04 +02005275 * If we have rb pages ensure they're a power-of-two number, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005276 * can do bitmasks instead of modulo.
5277 */
Kan Liang2ed11312015-03-02 02:14:26 -05005278 if (nr_pages != 0 && !is_power_of_2(nr_pages))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005279 return -EINVAL;
5280
5281 if (vma_size != PAGE_SIZE * (1 + nr_pages))
5282 return -EINVAL;
5283
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005284 WARN_ON_ONCE(event->ctx->parent_ctx);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005285again:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005286 mutex_lock(&event->mmap_mutex);
Frederic Weisbecker76369132011-05-19 19:55:04 +02005287 if (event->rb) {
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005288 if (event->rb->nr_pages != nr_pages) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005289 ret = -EINVAL;
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005290 goto unlock;
5291 }
5292
5293 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5294 /*
5295 * Raced against perf_mmap_close() through
5296 * perf_event_set_output(). Try again, hope for better
5297 * luck.
5298 */
5299 mutex_unlock(&event->mmap_mutex);
5300 goto again;
5301 }
5302
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005303 goto unlock;
5304 }
5305
5306 user_extra = nr_pages + 1;
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005307
5308accounting:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005309 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
5310
5311 /*
5312 * Increase the limit linearly with more CPUs:
5313 */
5314 user_lock_limit *= num_online_cpus();
5315
5316 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
5317
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005318 if (user_locked > user_lock_limit)
5319 extra = user_locked - user_lock_limit;
5320
Jiri Slaby78d7d402010-03-05 13:42:54 -08005321 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005322 lock_limit >>= PAGE_SHIFT;
Christoph Lameterbc3e53f2011-10-31 17:07:30 -07005323 locked = vma->vm_mm->pinned_vm + extra;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005324
5325 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5326 !capable(CAP_IPC_LOCK)) {
5327 ret = -EPERM;
5328 goto unlock;
5329 }
5330
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005331 WARN_ON(!rb && event->rb);
Peter Zijlstra906010b2009-09-21 16:08:49 +02005332
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02005333 if (vma->vm_flags & VM_WRITE)
Frederic Weisbecker76369132011-05-19 19:55:04 +02005334 flags |= RING_BUFFER_WRITABLE;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02005335
Frederic Weisbecker76369132011-05-19 19:55:04 +02005336 if (!rb) {
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005337 rb = rb_alloc(nr_pages,
5338 event->attr.watermark ? event->attr.wakeup_watermark : 0,
5339 event->cpu, flags);
5340
5341 if (!rb) {
5342 ret = -ENOMEM;
5343 goto unlock;
5344 }
5345
5346 atomic_set(&rb->mmap_count, 1);
5347 rb->mmap_user = get_current_user();
5348 rb->mmap_locked = extra;
5349
5350 ring_buffer_attach(event, rb);
5351
5352 perf_event_init_userpage(event);
5353 perf_event_update_userpage(event);
5354 } else {
Alexander Shishkin1a594132015-01-14 14:18:18 +02005355 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5356 event->attr.aux_watermark, flags);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005357 if (!ret)
5358 rb->aux_mmap_locked = extra;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005359 }
Peter Zijlstra26cb63a2013-05-28 10:55:48 +02005360
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005361unlock:
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005362 if (!ret) {
5363 atomic_long_add(user_extra, &user->locked_vm);
5364 vma->vm_mm->pinned_vm += extra;
5365
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005366 atomic_inc(&event->mmap_count);
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02005367 } else if (rb) {
5368 atomic_dec(&rb->mmap_count);
5369 }
5370aux_unlock:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005371 mutex_unlock(&event->mmap_mutex);
5372
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02005373 /*
5374 * Since pinned accounting is per vm we cannot allow fork() to copy our
5375 * vma.
5376 */
Peter Zijlstra26cb63a2013-05-28 10:55:48 +02005377 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005378 vma->vm_ops = &perf_mmap_vmops;
5379
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005380 if (event->pmu->event_mapped)
Peter Zijlstrabfe334922017-08-02 19:39:30 +02005381 event->pmu->event_mapped(event, vma->vm_mm);
Andy Lutomirski1e0fb9e2014-10-24 15:58:10 -07005382
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005383 return ret;
5384}
5385
5386static int perf_fasync(int fd, struct file *filp, int on)
5387{
Al Viro496ad9a2013-01-23 17:07:38 -05005388 struct inode *inode = file_inode(filp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005389 struct perf_event *event = filp->private_data;
5390 int retval;
5391
Al Viro59551022016-01-22 15:40:57 -05005392 inode_lock(inode);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005393 retval = fasync_helper(fd, filp, on, &event->fasync);
Al Viro59551022016-01-22 15:40:57 -05005394 inode_unlock(inode);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005395
5396 if (retval < 0)
5397 return retval;
5398
5399 return 0;
5400}
5401
5402static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01005403 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005404 .release = perf_release,
5405 .read = perf_read,
5406 .poll = perf_poll,
5407 .unlocked_ioctl = perf_ioctl,
Pawel Mollb3f20782014-06-13 16:03:32 +01005408 .compat_ioctl = perf_compat_ioctl,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005409 .mmap = perf_mmap,
5410 .fasync = perf_fasync,
5411};
5412
5413/*
5414 * Perf event wakeup
5415 *
5416 * If there's data, ensure we set the poll() state and publish everything
5417 * to user-space before waking everybody up.
5418 */
5419
Peter Zijlstrafed66e2cd2015-06-11 10:32:01 +02005420static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5421{
5422 /* only the parent has fasync state */
5423 if (event->parent)
5424 event = event->parent;
5425 return &event->fasync;
5426}
5427
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005428void perf_event_wakeup(struct perf_event *event)
5429{
Peter Zijlstra10c6db12011-11-26 02:47:31 +01005430 ring_buffer_wakeup(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005431
5432 if (event->pending_kill) {
Peter Zijlstrafed66e2cd2015-06-11 10:32:01 +02005433 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005434 event->pending_kill = 0;
5435 }
5436}
5437
Peter Zijlstrae360adb2010-10-14 14:01:34 +08005438static void perf_pending_event(struct irq_work *entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005439{
5440 struct perf_event *event = container_of(entry,
5441 struct perf_event, pending);
Peter Zijlstrad5252112015-02-19 18:03:11 +01005442 int rctx;
5443
5444 rctx = perf_swevent_get_recursion_context();
5445 /*
5446 * If we 'fail' here, that's OK, it means recursion is already disabled
5447 * and we won't recurse 'further'.
5448 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005449
5450 if (event->pending_disable) {
5451 event->pending_disable = 0;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +01005452 perf_event_disable_local(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005453 }
5454
5455 if (event->pending_wakeup) {
5456 event->pending_wakeup = 0;
5457 perf_event_wakeup(event);
5458 }
Peter Zijlstrad5252112015-02-19 18:03:11 +01005459
5460 if (rctx >= 0)
5461 perf_swevent_put_recursion_context(rctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005462}
5463
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005464/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08005465 * We assume there is only KVM supporting the callbacks.
5466 * Later on, we might change it to a list if there is
5467 * another virtualization implementation supporting the callbacks.
5468 */
5469struct perf_guest_info_callbacks *perf_guest_cbs;
5470
5471int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5472{
5473 perf_guest_cbs = cbs;
5474 return 0;
5475}
5476EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5477
5478int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5479{
5480 perf_guest_cbs = NULL;
5481 return 0;
5482}
5483EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5484
Jiri Olsa40189942012-08-07 15:20:37 +02005485static void
5486perf_output_sample_regs(struct perf_output_handle *handle,
5487 struct pt_regs *regs, u64 mask)
5488{
5489 int bit;
Madhavan Srinivasan29dd3282016-08-17 15:06:08 +05305490 DECLARE_BITMAP(_mask, 64);
Jiri Olsa40189942012-08-07 15:20:37 +02005491
Madhavan Srinivasan29dd3282016-08-17 15:06:08 +05305492 bitmap_from_u64(_mask, mask);
5493 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
Jiri Olsa40189942012-08-07 15:20:37 +02005494 u64 val;
5495
5496 val = perf_reg_value(regs, bit);
5497 perf_output_put(handle, val);
5498 }
5499}
5500
Stephane Eranian60e23642014-09-24 13:48:37 +02005501static void perf_sample_regs_user(struct perf_regs *regs_user,
Andy Lutomirski88a7c262015-01-04 10:36:19 -08005502 struct pt_regs *regs,
5503 struct pt_regs *regs_user_copy)
Jiri Olsa40189942012-08-07 15:20:37 +02005504{
Andy Lutomirski88a7c262015-01-04 10:36:19 -08005505 if (user_mode(regs)) {
5506 regs_user->abi = perf_reg_abi(current);
Peter Zijlstra25657112014-09-24 13:48:42 +02005507 regs_user->regs = regs;
Andy Lutomirski88a7c262015-01-04 10:36:19 -08005508 } else if (current->mm) {
5509 perf_get_regs_user(regs_user, regs, regs_user_copy);
Peter Zijlstra25657112014-09-24 13:48:42 +02005510 } else {
5511 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5512 regs_user->regs = NULL;
Jiri Olsa40189942012-08-07 15:20:37 +02005513 }
5514}
5515
Stephane Eranian60e23642014-09-24 13:48:37 +02005516static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5517 struct pt_regs *regs)
5518{
5519 regs_intr->regs = regs;
5520 regs_intr->abi = perf_reg_abi(current);
5521}
5522
5523
Jiri Olsac5ebced2012-08-07 15:20:40 +02005524/*
5525 * Get remaining task size from user stack pointer.
5526 *
5527 * It'd be better to take stack vma map and limit this more
5528 * precisly, but there's no way to get it safely under interrupt,
5529 * so using TASK_SIZE as limit.
5530 */
5531static u64 perf_ustack_task_size(struct pt_regs *regs)
5532{
5533 unsigned long addr = perf_user_stack_pointer(regs);
5534
5535 if (!addr || addr >= TASK_SIZE)
5536 return 0;
5537
5538 return TASK_SIZE - addr;
5539}
5540
5541static u16
5542perf_sample_ustack_size(u16 stack_size, u16 header_size,
5543 struct pt_regs *regs)
5544{
5545 u64 task_size;
5546
5547 /* No regs, no stack pointer, no dump. */
5548 if (!regs)
5549 return 0;
5550
5551 /*
5552 * Check if we fit in with the requested stack size into the:
5553 * - TASK_SIZE
5554 * If we don't, we limit the size to the TASK_SIZE.
5555 *
5556 * - remaining sample size
5557 * If we don't, we customize the stack size to
5558 * fit in to the remaining sample size.
5559 */
5560
5561 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5562 stack_size = min(stack_size, (u16) task_size);
5563
5564 /* Current header size plus static size and dynamic size. */
5565 header_size += 2 * sizeof(u64);
5566
5567 /* Do we fit in with the current stack dump size? */
5568 if ((u16) (header_size + stack_size) < header_size) {
5569 /*
5570 * If we overflow the maximum size for the sample,
5571 * we customize the stack dump size to fit in.
5572 */
5573 stack_size = USHRT_MAX - header_size - sizeof(u64);
5574 stack_size = round_up(stack_size, sizeof(u64));
5575 }
5576
5577 return stack_size;
5578}
5579
5580static void
5581perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5582 struct pt_regs *regs)
5583{
5584 /* Case of a kernel thread, nothing to dump */
5585 if (!regs) {
5586 u64 size = 0;
5587 perf_output_put(handle, size);
5588 } else {
5589 unsigned long sp;
5590 unsigned int rem;
5591 u64 dyn_size;
5592
5593 /*
5594 * We dump:
5595 * static size
5596 * - the size requested by user or the best one we can fit
5597 * in to the sample max size
5598 * data
5599 * - user stack dump data
5600 * dynamic size
5601 * - the actual dumped size
5602 */
5603
5604 /* Static size. */
5605 perf_output_put(handle, dump_size);
5606
5607 /* Data. */
5608 sp = perf_user_stack_pointer(regs);
5609 rem = __output_copy_user(handle, (void *) sp, dump_size);
5610 dyn_size = dump_size - rem;
5611
5612 perf_output_skip(handle, rem);
5613
5614 /* Dynamic size. */
5615 perf_output_put(handle, dyn_size);
5616 }
5617}
5618
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02005619static void __perf_event_header__init_id(struct perf_event_header *header,
5620 struct perf_sample_data *data,
5621 struct perf_event *event)
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02005622{
5623 u64 sample_type = event->attr.sample_type;
5624
5625 data->type = sample_type;
5626 header->size += event->id_header_size;
5627
5628 if (sample_type & PERF_SAMPLE_TID) {
5629 /* namespace issues */
5630 data->tid_entry.pid = perf_event_pid(event, current);
5631 data->tid_entry.tid = perf_event_tid(event, current);
5632 }
5633
5634 if (sample_type & PERF_SAMPLE_TIME)
Peter Zijlstra34f43922015-02-20 14:05:38 +01005635 data->time = perf_event_clock(event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02005636
Adrian Hunterff3d5272013-08-27 11:23:07 +03005637 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02005638 data->id = primary_event_id(event);
5639
5640 if (sample_type & PERF_SAMPLE_STREAM_ID)
5641 data->stream_id = event->id;
5642
5643 if (sample_type & PERF_SAMPLE_CPU) {
5644 data->cpu_entry.cpu = raw_smp_processor_id();
5645 data->cpu_entry.reserved = 0;
5646 }
5647}
5648
Frederic Weisbecker76369132011-05-19 19:55:04 +02005649void perf_event_header__init_id(struct perf_event_header *header,
5650 struct perf_sample_data *data,
5651 struct perf_event *event)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02005652{
5653 if (event->attr.sample_id_all)
5654 __perf_event_header__init_id(header, data, event);
5655}
5656
5657static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5658 struct perf_sample_data *data)
5659{
5660 u64 sample_type = data->type;
5661
5662 if (sample_type & PERF_SAMPLE_TID)
5663 perf_output_put(handle, data->tid_entry);
5664
5665 if (sample_type & PERF_SAMPLE_TIME)
5666 perf_output_put(handle, data->time);
5667
5668 if (sample_type & PERF_SAMPLE_ID)
5669 perf_output_put(handle, data->id);
5670
5671 if (sample_type & PERF_SAMPLE_STREAM_ID)
5672 perf_output_put(handle, data->stream_id);
5673
5674 if (sample_type & PERF_SAMPLE_CPU)
5675 perf_output_put(handle, data->cpu_entry);
Adrian Hunterff3d5272013-08-27 11:23:07 +03005676
5677 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5678 perf_output_put(handle, data->id);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02005679}
5680
Frederic Weisbecker76369132011-05-19 19:55:04 +02005681void perf_event__output_id_sample(struct perf_event *event,
5682 struct perf_output_handle *handle,
5683 struct perf_sample_data *sample)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02005684{
5685 if (event->attr.sample_id_all)
5686 __perf_event__output_id_sample(handle, sample);
5687}
5688
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005689static void perf_output_read_one(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02005690 struct perf_event *event,
5691 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005692{
5693 u64 read_format = event->attr.read_format;
5694 u64 values[4];
5695 int n = 0;
5696
Peter Zijlstrab5e58792010-05-21 14:43:12 +02005697 values[n++] = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005698 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Stephane Eranianeed01522010-10-26 16:08:01 +02005699 values[n++] = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005700 atomic64_read(&event->child_total_time_enabled);
5701 }
5702 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Stephane Eranianeed01522010-10-26 16:08:01 +02005703 values[n++] = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005704 atomic64_read(&event->child_total_time_running);
5705 }
5706 if (read_format & PERF_FORMAT_ID)
5707 values[n++] = primary_event_id(event);
5708
Frederic Weisbecker76369132011-05-19 19:55:04 +02005709 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005710}
5711
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005712static void perf_output_read_group(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02005713 struct perf_event *event,
5714 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005715{
5716 struct perf_event *leader = event->group_leader, *sub;
5717 u64 read_format = event->attr.read_format;
5718 u64 values[5];
5719 int n = 0;
5720
5721 values[n++] = 1 + leader->nr_siblings;
5722
5723 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
Stephane Eranianeed01522010-10-26 16:08:01 +02005724 values[n++] = enabled;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005725
5726 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
Stephane Eranianeed01522010-10-26 16:08:01 +02005727 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005728
5729 if (leader != event)
5730 leader->pmu->read(leader);
5731
Peter Zijlstrab5e58792010-05-21 14:43:12 +02005732 values[n++] = perf_event_count(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005733 if (read_format & PERF_FORMAT_ID)
5734 values[n++] = primary_event_id(leader);
5735
Frederic Weisbecker76369132011-05-19 19:55:04 +02005736 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005737
5738 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
5739 n = 0;
5740
Jiri Olsa6f5ab002012-10-15 20:13:45 +02005741 if ((sub != event) &&
5742 (sub->state == PERF_EVENT_STATE_ACTIVE))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005743 sub->pmu->read(sub);
5744
Peter Zijlstrab5e58792010-05-21 14:43:12 +02005745 values[n++] = perf_event_count(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005746 if (read_format & PERF_FORMAT_ID)
5747 values[n++] = primary_event_id(sub);
5748
Frederic Weisbecker76369132011-05-19 19:55:04 +02005749 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005750 }
5751}
5752
Stephane Eranianeed01522010-10-26 16:08:01 +02005753#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5754 PERF_FORMAT_TOTAL_TIME_RUNNING)
5755
Peter Zijlstraba5213a2017-05-30 11:45:12 +02005756/*
5757 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
5758 *
5759 * The problem is that its both hard and excessively expensive to iterate the
5760 * child list, not to mention that its impossible to IPI the children running
5761 * on another CPU, from interrupt/NMI context.
5762 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005763static void perf_output_read(struct perf_output_handle *handle,
5764 struct perf_event *event)
5765{
Peter Zijlstrae3f35412011-11-21 11:43:53 +01005766 u64 enabled = 0, running = 0, now;
Stephane Eranianeed01522010-10-26 16:08:01 +02005767 u64 read_format = event->attr.read_format;
5768
5769 /*
5770 * compute total_time_enabled, total_time_running
5771 * based on snapshot values taken when the event
5772 * was last scheduled in.
5773 *
5774 * we cannot simply called update_context_time()
5775 * because of locking issue as we are called in
5776 * NMI context
5777 */
Eric B Munsonc4794292011-06-23 16:34:38 -04005778 if (read_format & PERF_FORMAT_TOTAL_TIMES)
Peter Zijlstrae3f35412011-11-21 11:43:53 +01005779 calc_timer_values(event, &now, &enabled, &running);
Stephane Eranianeed01522010-10-26 16:08:01 +02005780
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005781 if (event->attr.read_format & PERF_FORMAT_GROUP)
Stephane Eranianeed01522010-10-26 16:08:01 +02005782 perf_output_read_group(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005783 else
Stephane Eranianeed01522010-10-26 16:08:01 +02005784 perf_output_read_one(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005785}
5786
5787void perf_output_sample(struct perf_output_handle *handle,
5788 struct perf_event_header *header,
5789 struct perf_sample_data *data,
5790 struct perf_event *event)
5791{
5792 u64 sample_type = data->type;
5793
5794 perf_output_put(handle, *header);
5795
Adrian Hunterff3d5272013-08-27 11:23:07 +03005796 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5797 perf_output_put(handle, data->id);
5798
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005799 if (sample_type & PERF_SAMPLE_IP)
5800 perf_output_put(handle, data->ip);
5801
5802 if (sample_type & PERF_SAMPLE_TID)
5803 perf_output_put(handle, data->tid_entry);
5804
5805 if (sample_type & PERF_SAMPLE_TIME)
5806 perf_output_put(handle, data->time);
5807
5808 if (sample_type & PERF_SAMPLE_ADDR)
5809 perf_output_put(handle, data->addr);
5810
5811 if (sample_type & PERF_SAMPLE_ID)
5812 perf_output_put(handle, data->id);
5813
5814 if (sample_type & PERF_SAMPLE_STREAM_ID)
5815 perf_output_put(handle, data->stream_id);
5816
5817 if (sample_type & PERF_SAMPLE_CPU)
5818 perf_output_put(handle, data->cpu_entry);
5819
5820 if (sample_type & PERF_SAMPLE_PERIOD)
5821 perf_output_put(handle, data->period);
5822
5823 if (sample_type & PERF_SAMPLE_READ)
5824 perf_output_read(handle, event);
5825
5826 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5827 if (data->callchain) {
5828 int size = 1;
5829
5830 if (data->callchain)
5831 size += data->callchain->nr;
5832
5833 size *= sizeof(u64);
5834
Frederic Weisbecker76369132011-05-19 19:55:04 +02005835 __output_copy(handle, data->callchain, size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005836 } else {
5837 u64 nr = 0;
5838 perf_output_put(handle, nr);
5839 }
5840 }
5841
5842 if (sample_type & PERF_SAMPLE_RAW) {
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02005843 struct perf_raw_record *raw = data->raw;
Alexei Starovoitovfa128e62015-10-20 20:02:33 -07005844
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02005845 if (raw) {
5846 struct perf_raw_frag *frag = &raw->frag;
5847
5848 perf_output_put(handle, raw->size);
5849 do {
5850 if (frag->copy) {
5851 __output_custom(handle, frag->copy,
5852 frag->data, frag->size);
5853 } else {
5854 __output_copy(handle, frag->data,
5855 frag->size);
5856 }
5857 if (perf_raw_frag_last(frag))
5858 break;
5859 frag = frag->next;
5860 } while (1);
5861 if (frag->pad)
5862 __output_skip(handle, NULL, frag->pad);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005863 } else {
5864 struct {
5865 u32 size;
5866 u32 data;
5867 } raw = {
5868 .size = sizeof(u32),
5869 .data = 0,
5870 };
5871 perf_output_put(handle, raw);
5872 }
5873 }
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02005874
Stephane Eranianbce38cd2012-02-09 23:20:51 +01005875 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5876 if (data->br_stack) {
5877 size_t size;
5878
5879 size = data->br_stack->nr
5880 * sizeof(struct perf_branch_entry);
5881
5882 perf_output_put(handle, data->br_stack->nr);
5883 perf_output_copy(handle, data->br_stack->entries, size);
5884 } else {
5885 /*
5886 * we always store at least the value of nr
5887 */
5888 u64 nr = 0;
5889 perf_output_put(handle, nr);
5890 }
5891 }
Jiri Olsa40189942012-08-07 15:20:37 +02005892
5893 if (sample_type & PERF_SAMPLE_REGS_USER) {
5894 u64 abi = data->regs_user.abi;
5895
5896 /*
5897 * If there are no regs to dump, notice it through
5898 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5899 */
5900 perf_output_put(handle, abi);
5901
5902 if (abi) {
5903 u64 mask = event->attr.sample_regs_user;
5904 perf_output_sample_regs(handle,
5905 data->regs_user.regs,
5906 mask);
5907 }
5908 }
Jiri Olsac5ebced2012-08-07 15:20:40 +02005909
Peter Zijlstraa5cdd402013-07-16 17:09:07 +02005910 if (sample_type & PERF_SAMPLE_STACK_USER) {
Jiri Olsac5ebced2012-08-07 15:20:40 +02005911 perf_output_sample_ustack(handle,
5912 data->stack_user_size,
5913 data->regs_user.regs);
Peter Zijlstraa5cdd402013-07-16 17:09:07 +02005914 }
Andi Kleenc3feedf2013-01-24 16:10:28 +01005915
5916 if (sample_type & PERF_SAMPLE_WEIGHT)
5917 perf_output_put(handle, data->weight);
Stephane Eraniand6be9ad2013-01-24 16:10:31 +01005918
5919 if (sample_type & PERF_SAMPLE_DATA_SRC)
5920 perf_output_put(handle, data->data_src.val);
Peter Zijlstraa5cdd402013-07-16 17:09:07 +02005921
Andi Kleenfdfbbd02013-09-20 07:40:39 -07005922 if (sample_type & PERF_SAMPLE_TRANSACTION)
5923 perf_output_put(handle, data->txn);
5924
Stephane Eranian60e23642014-09-24 13:48:37 +02005925 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5926 u64 abi = data->regs_intr.abi;
5927 /*
5928 * If there are no regs to dump, notice it through
5929 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5930 */
5931 perf_output_put(handle, abi);
5932
5933 if (abi) {
5934 u64 mask = event->attr.sample_regs_intr;
5935
5936 perf_output_sample_regs(handle,
5937 data->regs_intr.regs,
5938 mask);
5939 }
5940 }
5941
Kan Liangfc7ce9c2017-08-28 20:52:49 -04005942 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
5943 perf_output_put(handle, data->phys_addr);
5944
Peter Zijlstraa5cdd402013-07-16 17:09:07 +02005945 if (!event->attr.watermark) {
5946 int wakeup_events = event->attr.wakeup_events;
5947
5948 if (wakeup_events) {
5949 struct ring_buffer *rb = handle->rb;
5950 int events = local_inc_return(&rb->events);
5951
5952 if (events >= wakeup_events) {
5953 local_sub(wakeup_events, &rb->events);
5954 local_inc(&rb->wakeup);
5955 }
5956 }
5957 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005958}
5959
Kan Liangfc7ce9c2017-08-28 20:52:49 -04005960static u64 perf_virt_to_phys(u64 virt)
5961{
5962 u64 phys_addr = 0;
5963 struct page *p = NULL;
5964
5965 if (!virt)
5966 return 0;
5967
5968 if (virt >= TASK_SIZE) {
5969 /* If it's vmalloc()d memory, leave phys_addr as 0 */
5970 if (virt_addr_valid((void *)(uintptr_t)virt) &&
5971 !(virt >= VMALLOC_START && virt < VMALLOC_END))
5972 phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
5973 } else {
5974 /*
5975 * Walking the pages tables for user address.
5976 * Interrupts are disabled, so it prevents any tear down
5977 * of the page tables.
5978 * Try IRQ-safe __get_user_pages_fast first.
5979 * If failed, leave phys_addr as 0.
5980 */
5981 if ((current->mm != NULL) &&
5982 (__get_user_pages_fast(virt, 1, 0, &p) == 1))
5983 phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
5984
5985 if (p)
5986 put_page(p);
5987 }
5988
5989 return phys_addr;
5990}
5991
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005992void perf_prepare_sample(struct perf_event_header *header,
5993 struct perf_sample_data *data,
5994 struct perf_event *event,
5995 struct pt_regs *regs)
5996{
5997 u64 sample_type = event->attr.sample_type;
5998
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005999 header->type = PERF_RECORD_SAMPLE;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006000 header->size = sizeof(*header) + event->header_size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006001
6002 header->misc = 0;
6003 header->misc |= perf_misc_flags(regs);
6004
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006005 __perf_event_header__init_id(header, data, event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02006006
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006007 if (sample_type & PERF_SAMPLE_IP)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006008 data->ip = perf_instruction_pointer(regs);
6009
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006010 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
6011 int size = 1;
6012
Andrew Vagine6dab5f2012-07-11 18:14:58 +04006013 data->callchain = perf_callchain(event, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006014
6015 if (data->callchain)
6016 size += data->callchain->nr;
6017
6018 header->size += size * sizeof(u64);
6019 }
6020
6021 if (sample_type & PERF_SAMPLE_RAW) {
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02006022 struct perf_raw_record *raw = data->raw;
6023 int size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006024
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02006025 if (raw) {
6026 struct perf_raw_frag *frag = &raw->frag;
6027 u32 sum = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006028
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02006029 do {
6030 sum += frag->size;
6031 if (perf_raw_frag_last(frag))
6032 break;
6033 frag = frag->next;
6034 } while (1);
6035
6036 size = round_up(sum + sizeof(u32), sizeof(u64));
6037 raw->size = size - sizeof(u32);
6038 frag->pad = raw->size - sum;
6039 } else {
6040 size = sizeof(u64);
6041 }
6042
6043 header->size += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006044 }
Stephane Eranianbce38cd2012-02-09 23:20:51 +01006045
6046 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
6047 int size = sizeof(u64); /* nr */
6048 if (data->br_stack) {
6049 size += data->br_stack->nr
6050 * sizeof(struct perf_branch_entry);
6051 }
6052 header->size += size;
6053 }
Jiri Olsa40189942012-08-07 15:20:37 +02006054
Peter Zijlstra25657112014-09-24 13:48:42 +02006055 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
Andy Lutomirski88a7c262015-01-04 10:36:19 -08006056 perf_sample_regs_user(&data->regs_user, regs,
6057 &data->regs_user_copy);
Peter Zijlstra25657112014-09-24 13:48:42 +02006058
Jiri Olsa40189942012-08-07 15:20:37 +02006059 if (sample_type & PERF_SAMPLE_REGS_USER) {
6060 /* regs dump ABI info */
6061 int size = sizeof(u64);
6062
Jiri Olsa40189942012-08-07 15:20:37 +02006063 if (data->regs_user.regs) {
6064 u64 mask = event->attr.sample_regs_user;
6065 size += hweight64(mask) * sizeof(u64);
6066 }
6067
6068 header->size += size;
6069 }
Jiri Olsac5ebced2012-08-07 15:20:40 +02006070
6071 if (sample_type & PERF_SAMPLE_STACK_USER) {
6072 /*
6073 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
6074 * processed as the last one or have additional check added
6075 * in case new sample type is added, because we could eat
6076 * up the rest of the sample size.
6077 */
Jiri Olsac5ebced2012-08-07 15:20:40 +02006078 u16 stack_size = event->attr.sample_stack_user;
6079 u16 size = sizeof(u64);
6080
Jiri Olsac5ebced2012-08-07 15:20:40 +02006081 stack_size = perf_sample_ustack_size(stack_size, header->size,
Peter Zijlstra25657112014-09-24 13:48:42 +02006082 data->regs_user.regs);
Jiri Olsac5ebced2012-08-07 15:20:40 +02006083
6084 /*
6085 * If there is something to dump, add space for the dump
6086 * itself and for the field that tells the dynamic size,
6087 * which is how many have been actually dumped.
6088 */
6089 if (stack_size)
6090 size += sizeof(u64) + stack_size;
6091
6092 data->stack_user_size = stack_size;
6093 header->size += size;
6094 }
Stephane Eranian60e23642014-09-24 13:48:37 +02006095
6096 if (sample_type & PERF_SAMPLE_REGS_INTR) {
6097 /* regs dump ABI info */
6098 int size = sizeof(u64);
6099
6100 perf_sample_regs_intr(&data->regs_intr, regs);
6101
6102 if (data->regs_intr.regs) {
6103 u64 mask = event->attr.sample_regs_intr;
6104
6105 size += hweight64(mask) * sizeof(u64);
6106 }
6107
6108 header->size += size;
6109 }
Kan Liangfc7ce9c2017-08-28 20:52:49 -04006110
6111 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
6112 data->phys_addr = perf_virt_to_phys(data->addr);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006113}
6114
Wang Nan9ecda412016-04-05 14:11:18 +00006115static void __always_inline
6116__perf_event_output(struct perf_event *event,
6117 struct perf_sample_data *data,
6118 struct pt_regs *regs,
6119 int (*output_begin)(struct perf_output_handle *,
6120 struct perf_event *,
6121 unsigned int))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006122{
6123 struct perf_output_handle handle;
6124 struct perf_event_header header;
6125
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02006126 /* protect the callchain buffers */
6127 rcu_read_lock();
6128
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006129 perf_prepare_sample(&header, data, event, regs);
6130
Wang Nan9ecda412016-04-05 14:11:18 +00006131 if (output_begin(&handle, event, header.size))
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02006132 goto exit;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006133
6134 perf_output_sample(&handle, &header, data, event);
6135
6136 perf_output_end(&handle);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02006137
6138exit:
6139 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006140}
6141
Wang Nan9ecda412016-04-05 14:11:18 +00006142void
6143perf_event_output_forward(struct perf_event *event,
6144 struct perf_sample_data *data,
6145 struct pt_regs *regs)
6146{
6147 __perf_event_output(event, data, regs, perf_output_begin_forward);
6148}
6149
6150void
6151perf_event_output_backward(struct perf_event *event,
6152 struct perf_sample_data *data,
6153 struct pt_regs *regs)
6154{
6155 __perf_event_output(event, data, regs, perf_output_begin_backward);
6156}
6157
6158void
6159perf_event_output(struct perf_event *event,
6160 struct perf_sample_data *data,
6161 struct pt_regs *regs)
6162{
6163 __perf_event_output(event, data, regs, perf_output_begin);
6164}
6165
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006166/*
6167 * read event_id
6168 */
6169
6170struct perf_read_event {
6171 struct perf_event_header header;
6172
6173 u32 pid;
6174 u32 tid;
6175};
6176
6177static void
6178perf_event_read_event(struct perf_event *event,
6179 struct task_struct *task)
6180{
6181 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006182 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006183 struct perf_read_event read_event = {
6184 .header = {
6185 .type = PERF_RECORD_READ,
6186 .misc = 0,
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006187 .size = sizeof(read_event) + event->read_size,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006188 },
6189 .pid = perf_event_pid(event, task),
6190 .tid = perf_event_tid(event, task),
6191 };
6192 int ret;
6193
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006194 perf_event_header__init_id(&read_event.header, &sample, event);
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02006195 ret = perf_output_begin(&handle, event, read_event.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006196 if (ret)
6197 return;
6198
6199 perf_output_put(&handle, read_event);
6200 perf_output_read(&handle, event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006201 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006202
6203 perf_output_end(&handle);
6204}
6205
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006206typedef void (perf_iterate_f)(struct perf_event *event, void *data);
Jiri Olsa52d857a2013-05-06 18:27:18 +02006207
6208static void
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006209perf_iterate_ctx(struct perf_event_context *ctx,
6210 perf_iterate_f output,
Alexander Shishkinb73e4fe2016-04-27 18:44:45 +03006211 void *data, bool all)
Jiri Olsa52d857a2013-05-06 18:27:18 +02006212{
6213 struct perf_event *event;
6214
6215 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Alexander Shishkinb73e4fe2016-04-27 18:44:45 +03006216 if (!all) {
6217 if (event->state < PERF_EVENT_STATE_INACTIVE)
6218 continue;
6219 if (!event_filter_match(event))
6220 continue;
6221 }
6222
Jiri Olsa67516842013-07-09 18:56:31 +02006223 output(event, data);
Jiri Olsa52d857a2013-05-06 18:27:18 +02006224 }
6225}
6226
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006227static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
Kan Liangf2fb6be2016-03-23 11:24:37 -07006228{
6229 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
6230 struct perf_event *event;
6231
6232 list_for_each_entry_rcu(event, &pel->list, sb_list) {
Peter Zijlstra0b8f1e22016-08-04 14:37:24 +02006233 /*
6234 * Skip events that are not fully formed yet; ensure that
6235 * if we observe event->ctx, both event and ctx will be
6236 * complete enough. See perf_install_in_context().
6237 */
6238 if (!smp_load_acquire(&event->ctx))
6239 continue;
6240
Kan Liangf2fb6be2016-03-23 11:24:37 -07006241 if (event->state < PERF_EVENT_STATE_INACTIVE)
6242 continue;
6243 if (!event_filter_match(event))
6244 continue;
6245 output(event, data);
6246 }
6247}
6248
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006249/*
6250 * Iterate all events that need to receive side-band events.
6251 *
6252 * For new callers; ensure that account_pmu_sb_event() includes
6253 * your event, otherwise it might not get delivered.
6254 */
Jiri Olsa4e93ad62015-11-04 16:00:05 +01006255static void
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006256perf_iterate_sb(perf_iterate_f output, void *data,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006257 struct perf_event_context *task_ctx)
6258{
Jiri Olsa52d857a2013-05-06 18:27:18 +02006259 struct perf_event_context *ctx;
Jiri Olsa52d857a2013-05-06 18:27:18 +02006260 int ctxn;
6261
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006262 rcu_read_lock();
6263 preempt_disable();
6264
Jiri Olsa4e93ad62015-11-04 16:00:05 +01006265 /*
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006266 * If we have task_ctx != NULL we only notify the task context itself.
6267 * The task_ctx is set only for EXIT events before releasing task
Jiri Olsa4e93ad62015-11-04 16:00:05 +01006268 * context.
6269 */
6270 if (task_ctx) {
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006271 perf_iterate_ctx(task_ctx, output, data, false);
6272 goto done;
Jiri Olsa4e93ad62015-11-04 16:00:05 +01006273 }
6274
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006275 perf_iterate_sb_cpu(output, data);
Kan Liangf2fb6be2016-03-23 11:24:37 -07006276
6277 for_each_task_context_nr(ctxn) {
Jiri Olsa52d857a2013-05-06 18:27:18 +02006278 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6279 if (ctx)
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006280 perf_iterate_ctx(ctx, output, data, false);
Jiri Olsa52d857a2013-05-06 18:27:18 +02006281 }
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006282done:
Kan Liangf2fb6be2016-03-23 11:24:37 -07006283 preempt_enable();
Jiri Olsa52d857a2013-05-06 18:27:18 +02006284 rcu_read_unlock();
6285}
6286
Alexander Shishkin375637b2016-04-27 18:44:46 +03006287/*
6288 * Clear all file-based filters at exec, they'll have to be
6289 * re-instated when/if these objects are mmapped again.
6290 */
6291static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6292{
6293 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6294 struct perf_addr_filter *filter;
6295 unsigned int restart = 0, count = 0;
6296 unsigned long flags;
6297
6298 if (!has_addr_filter(event))
6299 return;
6300
6301 raw_spin_lock_irqsave(&ifh->lock, flags);
6302 list_for_each_entry(filter, &ifh->list, entry) {
6303 if (filter->inode) {
6304 event->addr_filters_offs[count] = 0;
6305 restart++;
6306 }
6307
6308 count++;
6309 }
6310
6311 if (restart)
6312 event->addr_filters_gen++;
6313 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6314
6315 if (restart)
Alexander Shishkin767ae082016-09-06 16:23:49 +03006316 perf_event_stop(event, 1);
Alexander Shishkin375637b2016-04-27 18:44:46 +03006317}
6318
6319void perf_event_exec(void)
6320{
6321 struct perf_event_context *ctx;
6322 int ctxn;
6323
6324 rcu_read_lock();
6325 for_each_task_context_nr(ctxn) {
6326 ctx = current->perf_event_ctxp[ctxn];
6327 if (!ctx)
6328 continue;
6329
6330 perf_event_enable_on_exec(ctxn);
6331
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006332 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
Alexander Shishkin375637b2016-04-27 18:44:46 +03006333 true);
6334 }
6335 rcu_read_unlock();
6336}
6337
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006338struct remote_output {
6339 struct ring_buffer *rb;
6340 int err;
6341};
6342
6343static void __perf_event_output_stop(struct perf_event *event, void *data)
6344{
6345 struct perf_event *parent = event->parent;
6346 struct remote_output *ro = data;
6347 struct ring_buffer *rb = ro->rb;
Alexander Shishkin375637b2016-04-27 18:44:46 +03006348 struct stop_event_data sd = {
6349 .event = event,
6350 };
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006351
6352 if (!has_aux(event))
6353 return;
6354
6355 if (!parent)
6356 parent = event;
6357
6358 /*
6359 * In case of inheritance, it will be the parent that links to the
Alexander Shishkin767ae082016-09-06 16:23:49 +03006360 * ring-buffer, but it will be the child that's actually using it.
6361 *
6362 * We are using event::rb to determine if the event should be stopped,
6363 * however this may race with ring_buffer_attach() (through set_output),
6364 * which will make us skip the event that actually needs to be stopped.
6365 * So ring_buffer_attach() has to stop an aux event before re-assigning
6366 * its rb pointer.
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006367 */
6368 if (rcu_dereference(parent->rb) == rb)
Alexander Shishkin375637b2016-04-27 18:44:46 +03006369 ro->err = __perf_event_stop(&sd);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006370}
6371
6372static int __perf_pmu_output_stop(void *info)
6373{
6374 struct perf_event *event = info;
6375 struct pmu *pmu = event->pmu;
Will Deacon8b6a3fe2016-08-24 10:07:14 +01006376 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006377 struct remote_output ro = {
6378 .rb = event->rb,
6379 };
6380
6381 rcu_read_lock();
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006382 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006383 if (cpuctx->task_ctx)
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006384 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
Alexander Shishkinb73e4fe2016-04-27 18:44:45 +03006385 &ro, false);
Alexander Shishkin95ff4ca2015-12-02 18:41:11 +02006386 rcu_read_unlock();
6387
6388 return ro.err;
6389}
6390
6391static void perf_pmu_output_stop(struct perf_event *event)
6392{
6393 struct perf_event *iter;
6394 int err, cpu;
6395
6396restart:
6397 rcu_read_lock();
6398 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6399 /*
6400 * For per-CPU events, we need to make sure that neither they
6401 * nor their children are running; for cpu==-1 events it's
6402 * sufficient to stop the event itself if it's active, since
6403 * it can't have children.
6404 */
6405 cpu = iter->cpu;
6406 if (cpu == -1)
6407 cpu = READ_ONCE(iter->oncpu);
6408
6409 if (cpu == -1)
6410 continue;
6411
6412 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6413 if (err == -EAGAIN) {
6414 rcu_read_unlock();
6415 goto restart;
6416 }
6417 }
6418 rcu_read_unlock();
6419}
6420
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006421/*
6422 * task tracking -- fork/exit
6423 *
Stephane Eranian13d7a242013-08-21 12:10:24 +02006424 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006425 */
6426
6427struct perf_task_event {
6428 struct task_struct *task;
6429 struct perf_event_context *task_ctx;
6430
6431 struct {
6432 struct perf_event_header header;
6433
6434 u32 pid;
6435 u32 ppid;
6436 u32 tid;
6437 u32 ptid;
6438 u64 time;
6439 } event_id;
6440};
6441
Jiri Olsa67516842013-07-09 18:56:31 +02006442static int perf_event_task_match(struct perf_event *event)
6443{
Stephane Eranian13d7a242013-08-21 12:10:24 +02006444 return event->attr.comm || event->attr.mmap ||
6445 event->attr.mmap2 || event->attr.mmap_data ||
6446 event->attr.task;
Jiri Olsa67516842013-07-09 18:56:31 +02006447}
6448
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006449static void perf_event_task_output(struct perf_event *event,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006450 void *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006451{
Jiri Olsa52d857a2013-05-06 18:27:18 +02006452 struct perf_task_event *task_event = data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006453 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006454 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006455 struct task_struct *task = task_event->task;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006456 int ret, size = task_event->event_id.header.size;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01006457
Jiri Olsa67516842013-07-09 18:56:31 +02006458 if (!perf_event_task_match(event))
6459 return;
6460
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006461 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006462
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006463 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02006464 task_event->event_id.header.size);
Peter Zijlstraef607772010-05-18 10:50:41 +02006465 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006466 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006467
6468 task_event->event_id.pid = perf_event_pid(event, task);
6469 task_event->event_id.ppid = perf_event_pid(event, current);
6470
6471 task_event->event_id.tid = perf_event_tid(event, task);
6472 task_event->event_id.ptid = perf_event_tid(event, current);
6473
Peter Zijlstra34f43922015-02-20 14:05:38 +01006474 task_event->event_id.time = perf_event_clock(event);
6475
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006476 perf_output_put(&handle, task_event->event_id);
6477
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006478 perf_event__output_id_sample(event, &handle, &sample);
6479
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006480 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006481out:
6482 task_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006483}
6484
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006485static void perf_event_task(struct task_struct *task,
6486 struct perf_event_context *task_ctx,
6487 int new)
6488{
6489 struct perf_task_event task_event;
6490
6491 if (!atomic_read(&nr_comm_events) &&
6492 !atomic_read(&nr_mmap_events) &&
6493 !atomic_read(&nr_task_events))
6494 return;
6495
6496 task_event = (struct perf_task_event){
6497 .task = task,
6498 .task_ctx = task_ctx,
6499 .event_id = {
6500 .header = {
6501 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
6502 .misc = 0,
6503 .size = sizeof(task_event.event_id),
6504 },
6505 /* .pid */
6506 /* .ppid */
6507 /* .tid */
6508 /* .ptid */
Peter Zijlstra34f43922015-02-20 14:05:38 +01006509 /* .time */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006510 },
6511 };
6512
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006513 perf_iterate_sb(perf_event_task_output,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006514 &task_event,
6515 task_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006516}
6517
6518void perf_event_fork(struct task_struct *task)
6519{
6520 perf_event_task(task, NULL, 1);
Hari Bathinie4222672017-03-08 02:11:36 +05306521 perf_event_namespaces(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006522}
6523
6524/*
6525 * comm tracking
6526 */
6527
6528struct perf_comm_event {
6529 struct task_struct *task;
6530 char *comm;
6531 int comm_size;
6532
6533 struct {
6534 struct perf_event_header header;
6535
6536 u32 pid;
6537 u32 tid;
6538 } event_id;
6539};
6540
Jiri Olsa67516842013-07-09 18:56:31 +02006541static int perf_event_comm_match(struct perf_event *event)
6542{
6543 return event->attr.comm;
6544}
6545
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006546static void perf_event_comm_output(struct perf_event *event,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006547 void *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006548{
Jiri Olsa52d857a2013-05-06 18:27:18 +02006549 struct perf_comm_event *comm_event = data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006550 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006551 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006552 int size = comm_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006553 int ret;
6554
Jiri Olsa67516842013-07-09 18:56:31 +02006555 if (!perf_event_comm_match(event))
6556 return;
6557
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006558 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6559 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02006560 comm_event->event_id.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006561
6562 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006563 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006564
6565 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6566 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
6567
6568 perf_output_put(&handle, comm_event->event_id);
Frederic Weisbecker76369132011-05-19 19:55:04 +02006569 __output_copy(&handle, comm_event->comm,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006570 comm_event->comm_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006571
6572 perf_event__output_id_sample(event, &handle, &sample);
6573
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006574 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006575out:
6576 comm_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006577}
6578
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006579static void perf_event_comm_event(struct perf_comm_event *comm_event)
6580{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006581 char comm[TASK_COMM_LEN];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006582 unsigned int size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006583
6584 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01006585 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006586 size = ALIGN(strlen(comm)+1, sizeof(u64));
6587
6588 comm_event->comm = comm;
6589 comm_event->comm_size = size;
6590
6591 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02006592
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006593 perf_iterate_sb(perf_event_comm_output,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006594 comm_event,
6595 NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006596}
6597
Adrian Hunter82b89772014-05-28 11:45:04 +03006598void perf_event_comm(struct task_struct *task, bool exec)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006599{
6600 struct perf_comm_event comm_event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006601
6602 if (!atomic_read(&nr_comm_events))
6603 return;
6604
6605 comm_event = (struct perf_comm_event){
6606 .task = task,
6607 /* .comm */
6608 /* .comm_size */
6609 .event_id = {
6610 .header = {
6611 .type = PERF_RECORD_COMM,
Adrian Hunter82b89772014-05-28 11:45:04 +03006612 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006613 /* .size */
6614 },
6615 /* .pid */
6616 /* .tid */
6617 },
6618 };
6619
6620 perf_event_comm_event(&comm_event);
6621}
6622
6623/*
Hari Bathinie4222672017-03-08 02:11:36 +05306624 * namespaces tracking
6625 */
6626
6627struct perf_namespaces_event {
6628 struct task_struct *task;
6629
6630 struct {
6631 struct perf_event_header header;
6632
6633 u32 pid;
6634 u32 tid;
6635 u64 nr_namespaces;
6636 struct perf_ns_link_info link_info[NR_NAMESPACES];
6637 } event_id;
6638};
6639
6640static int perf_event_namespaces_match(struct perf_event *event)
6641{
6642 return event->attr.namespaces;
6643}
6644
6645static void perf_event_namespaces_output(struct perf_event *event,
6646 void *data)
6647{
6648 struct perf_namespaces_event *namespaces_event = data;
6649 struct perf_output_handle handle;
6650 struct perf_sample_data sample;
Jiri Olsa34900ec2017-08-09 18:14:06 +02006651 u16 header_size = namespaces_event->event_id.header.size;
Hari Bathinie4222672017-03-08 02:11:36 +05306652 int ret;
6653
6654 if (!perf_event_namespaces_match(event))
6655 return;
6656
6657 perf_event_header__init_id(&namespaces_event->event_id.header,
6658 &sample, event);
6659 ret = perf_output_begin(&handle, event,
6660 namespaces_event->event_id.header.size);
6661 if (ret)
Jiri Olsa34900ec2017-08-09 18:14:06 +02006662 goto out;
Hari Bathinie4222672017-03-08 02:11:36 +05306663
6664 namespaces_event->event_id.pid = perf_event_pid(event,
6665 namespaces_event->task);
6666 namespaces_event->event_id.tid = perf_event_tid(event,
6667 namespaces_event->task);
6668
6669 perf_output_put(&handle, namespaces_event->event_id);
6670
6671 perf_event__output_id_sample(event, &handle, &sample);
6672
6673 perf_output_end(&handle);
Jiri Olsa34900ec2017-08-09 18:14:06 +02006674out:
6675 namespaces_event->event_id.header.size = header_size;
Hari Bathinie4222672017-03-08 02:11:36 +05306676}
6677
6678static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
6679 struct task_struct *task,
6680 const struct proc_ns_operations *ns_ops)
6681{
6682 struct path ns_path;
6683 struct inode *ns_inode;
6684 void *error;
6685
6686 error = ns_get_path(&ns_path, task, ns_ops);
6687 if (!error) {
6688 ns_inode = ns_path.dentry->d_inode;
6689 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
6690 ns_link_info->ino = ns_inode->i_ino;
Vasily Averin0e18dd12017-11-15 08:47:02 +03006691 path_put(&ns_path);
Hari Bathinie4222672017-03-08 02:11:36 +05306692 }
6693}
6694
6695void perf_event_namespaces(struct task_struct *task)
6696{
6697 struct perf_namespaces_event namespaces_event;
6698 struct perf_ns_link_info *ns_link_info;
6699
6700 if (!atomic_read(&nr_namespaces_events))
6701 return;
6702
6703 namespaces_event = (struct perf_namespaces_event){
6704 .task = task,
6705 .event_id = {
6706 .header = {
6707 .type = PERF_RECORD_NAMESPACES,
6708 .misc = 0,
6709 .size = sizeof(namespaces_event.event_id),
6710 },
6711 /* .pid */
6712 /* .tid */
6713 .nr_namespaces = NR_NAMESPACES,
6714 /* .link_info[NR_NAMESPACES] */
6715 },
6716 };
6717
6718 ns_link_info = namespaces_event.event_id.link_info;
6719
6720 perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
6721 task, &mntns_operations);
6722
6723#ifdef CONFIG_USER_NS
6724 perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
6725 task, &userns_operations);
6726#endif
6727#ifdef CONFIG_NET_NS
6728 perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
6729 task, &netns_operations);
6730#endif
6731#ifdef CONFIG_UTS_NS
6732 perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
6733 task, &utsns_operations);
6734#endif
6735#ifdef CONFIG_IPC_NS
6736 perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
6737 task, &ipcns_operations);
6738#endif
6739#ifdef CONFIG_PID_NS
6740 perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
6741 task, &pidns_operations);
6742#endif
6743#ifdef CONFIG_CGROUPS
6744 perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
6745 task, &cgroupns_operations);
6746#endif
6747
6748 perf_iterate_sb(perf_event_namespaces_output,
6749 &namespaces_event,
6750 NULL);
6751}
6752
6753/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006754 * mmap tracking
6755 */
6756
6757struct perf_mmap_event {
6758 struct vm_area_struct *vma;
6759
6760 const char *file_name;
6761 int file_size;
Stephane Eranian13d7a242013-08-21 12:10:24 +02006762 int maj, min;
6763 u64 ino;
6764 u64 ino_generation;
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006765 u32 prot, flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006766
6767 struct {
6768 struct perf_event_header header;
6769
6770 u32 pid;
6771 u32 tid;
6772 u64 start;
6773 u64 len;
6774 u64 pgoff;
6775 } event_id;
6776};
6777
Jiri Olsa67516842013-07-09 18:56:31 +02006778static int perf_event_mmap_match(struct perf_event *event,
6779 void *data)
6780{
6781 struct perf_mmap_event *mmap_event = data;
6782 struct vm_area_struct *vma = mmap_event->vma;
6783 int executable = vma->vm_flags & VM_EXEC;
6784
6785 return (!executable && event->attr.mmap_data) ||
Stephane Eranian13d7a242013-08-21 12:10:24 +02006786 (executable && (event->attr.mmap || event->attr.mmap2));
Jiri Olsa67516842013-07-09 18:56:31 +02006787}
6788
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006789static void perf_event_mmap_output(struct perf_event *event,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006790 void *data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006791{
Jiri Olsa52d857a2013-05-06 18:27:18 +02006792 struct perf_mmap_event *mmap_event = data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006793 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006794 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006795 int size = mmap_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006796 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006797
Jiri Olsa67516842013-07-09 18:56:31 +02006798 if (!perf_event_mmap_match(event, data))
6799 return;
6800
Stephane Eranian13d7a242013-08-21 12:10:24 +02006801 if (event->attr.mmap2) {
6802 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6803 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6804 mmap_event->event_id.header.size += sizeof(mmap_event->min);
6805 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
Arnaldo Carvalho de Melod008d522013-09-10 10:24:05 -03006806 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006807 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6808 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
Stephane Eranian13d7a242013-08-21 12:10:24 +02006809 }
6810
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006811 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6812 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02006813 mmap_event->event_id.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006814 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006815 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006816
6817 mmap_event->event_id.pid = perf_event_pid(event, current);
6818 mmap_event->event_id.tid = perf_event_tid(event, current);
6819
6820 perf_output_put(&handle, mmap_event->event_id);
Stephane Eranian13d7a242013-08-21 12:10:24 +02006821
6822 if (event->attr.mmap2) {
6823 perf_output_put(&handle, mmap_event->maj);
6824 perf_output_put(&handle, mmap_event->min);
6825 perf_output_put(&handle, mmap_event->ino);
6826 perf_output_put(&handle, mmap_event->ino_generation);
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006827 perf_output_put(&handle, mmap_event->prot);
6828 perf_output_put(&handle, mmap_event->flags);
Stephane Eranian13d7a242013-08-21 12:10:24 +02006829 }
6830
Frederic Weisbecker76369132011-05-19 19:55:04 +02006831 __output_copy(&handle, mmap_event->file_name,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006832 mmap_event->file_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006833
6834 perf_event__output_id_sample(event, &handle, &sample);
6835
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006836 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02006837out:
6838 mmap_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006839}
6840
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006841static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6842{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006843 struct vm_area_struct *vma = mmap_event->vma;
6844 struct file *file = vma->vm_file;
Stephane Eranian13d7a242013-08-21 12:10:24 +02006845 int maj = 0, min = 0;
6846 u64 ino = 0, gen = 0;
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006847 u32 prot = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006848 unsigned int size;
6849 char tmp[16];
6850 char *buf = NULL;
Peter Zijlstra2c42cfbf2013-10-17 00:06:46 +02006851 char *name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006852
Peter Zijlstra0b3589b2017-01-26 23:15:08 +01006853 if (vma->vm_flags & VM_READ)
6854 prot |= PROT_READ;
6855 if (vma->vm_flags & VM_WRITE)
6856 prot |= PROT_WRITE;
6857 if (vma->vm_flags & VM_EXEC)
6858 prot |= PROT_EXEC;
6859
6860 if (vma->vm_flags & VM_MAYSHARE)
6861 flags = MAP_SHARED;
6862 else
6863 flags = MAP_PRIVATE;
6864
6865 if (vma->vm_flags & VM_DENYWRITE)
6866 flags |= MAP_DENYWRITE;
6867 if (vma->vm_flags & VM_MAYEXEC)
6868 flags |= MAP_EXECUTABLE;
6869 if (vma->vm_flags & VM_LOCKED)
6870 flags |= MAP_LOCKED;
6871 if (vma->vm_flags & VM_HUGETLB)
6872 flags |= MAP_HUGETLB;
6873
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006874 if (file) {
Stephane Eranian13d7a242013-08-21 12:10:24 +02006875 struct inode *inode;
6876 dev_t dev;
Oleg Nesterov3ea2f2b2013-10-16 22:10:04 +02006877
Peter Zijlstra2c42cfbf2013-10-17 00:06:46 +02006878 buf = kmalloc(PATH_MAX, GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006879 if (!buf) {
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006880 name = "//enomem";
6881 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006882 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006883 /*
Oleg Nesterov3ea2f2b2013-10-16 22:10:04 +02006884 * d_path() works from the end of the rb backwards, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006885 * need to add enough zero bytes after the string to handle
6886 * the 64bit alignment we do later.
6887 */
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +02006888 name = file_path(file, buf, PATH_MAX - sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006889 if (IS_ERR(name)) {
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006890 name = "//toolong";
6891 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006892 }
Stephane Eranian13d7a242013-08-21 12:10:24 +02006893 inode = file_inode(vma->vm_file);
6894 dev = inode->i_sb->s_dev;
6895 ino = inode->i_ino;
6896 gen = inode->i_generation;
6897 maj = MAJOR(dev);
6898 min = MINOR(dev);
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006899
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006900 goto got_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006901 } else {
Jiri Olsafbe26ab2014-07-14 17:57:19 +02006902 if (vma->vm_ops && vma->vm_ops->name) {
6903 name = (char *) vma->vm_ops->name(vma);
6904 if (name)
6905 goto cpy_name;
6906 }
6907
Peter Zijlstra2c42cfbf2013-10-17 00:06:46 +02006908 name = (char *)arch_vma_name(vma);
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006909 if (name)
6910 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006911
Oleg Nesterov32c5fb72013-10-16 22:09:45 +02006912 if (vma->vm_start <= vma->vm_mm->start_brk &&
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006913 vma->vm_end >= vma->vm_mm->brk) {
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006914 name = "[heap]";
6915 goto cpy_name;
Oleg Nesterov32c5fb72013-10-16 22:09:45 +02006916 }
6917 if (vma->vm_start <= vma->vm_mm->start_stack &&
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006918 vma->vm_end >= vma->vm_mm->start_stack) {
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006919 name = "[stack]";
6920 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006921 }
6922
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006923 name = "//anon";
6924 goto cpy_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006925 }
6926
Oleg Nesterovc7e548b2013-10-17 20:24:17 +02006927cpy_name:
6928 strlcpy(tmp, name, sizeof(tmp));
6929 name = tmp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006930got_name:
Peter Zijlstra2c42cfbf2013-10-17 00:06:46 +02006931 /*
6932 * Since our buffer works in 8 byte units we need to align our string
6933 * size to a multiple of 8. However, we must guarantee the tail end is
6934 * zero'd out to avoid leaking random bits to userspace.
6935 */
6936 size = strlen(name)+1;
6937 while (!IS_ALIGNED(size, sizeof(u64)))
6938 name[size++] = '\0';
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006939
6940 mmap_event->file_name = name;
6941 mmap_event->file_size = size;
Stephane Eranian13d7a242013-08-21 12:10:24 +02006942 mmap_event->maj = maj;
6943 mmap_event->min = min;
6944 mmap_event->ino = ino;
6945 mmap_event->ino_generation = gen;
Peter Zijlstraf972eb62014-05-19 15:13:47 -04006946 mmap_event->prot = prot;
6947 mmap_event->flags = flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006948
Stephane Eranian2fe85422013-01-24 16:10:39 +01006949 if (!(vma->vm_flags & VM_EXEC))
6950 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6951
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006952 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
6953
Peter Zijlstraaab5b712016-05-12 17:26:46 +02006954 perf_iterate_sb(perf_event_mmap_output,
Jiri Olsa52d857a2013-05-06 18:27:18 +02006955 mmap_event,
6956 NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006957
6958 kfree(buf);
6959}
6960
Alexander Shishkin375637b2016-04-27 18:44:46 +03006961/*
Alexander Shishkin375637b2016-04-27 18:44:46 +03006962 * Check whether inode and address range match filter criteria.
6963 */
6964static bool perf_addr_filter_match(struct perf_addr_filter *filter,
6965 struct file *file, unsigned long offset,
6966 unsigned long size)
6967{
Al Viro45063092016-12-04 18:24:56 -05006968 if (filter->inode != file_inode(file))
Alexander Shishkin375637b2016-04-27 18:44:46 +03006969 return false;
6970
6971 if (filter->offset > offset + size)
6972 return false;
6973
6974 if (filter->offset + filter->size < offset)
6975 return false;
6976
6977 return true;
6978}
6979
6980static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6981{
6982 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6983 struct vm_area_struct *vma = data;
6984 unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
6985 struct file *file = vma->vm_file;
6986 struct perf_addr_filter *filter;
6987 unsigned int restart = 0, count = 0;
6988
6989 if (!has_addr_filter(event))
6990 return;
6991
6992 if (!file)
6993 return;
6994
6995 raw_spin_lock_irqsave(&ifh->lock, flags);
6996 list_for_each_entry(filter, &ifh->list, entry) {
6997 if (perf_addr_filter_match(filter, file, off,
6998 vma->vm_end - vma->vm_start)) {
6999 event->addr_filters_offs[count] = vma->vm_start;
7000 restart++;
7001 }
7002
7003 count++;
7004 }
7005
7006 if (restart)
7007 event->addr_filters_gen++;
7008 raw_spin_unlock_irqrestore(&ifh->lock, flags);
7009
7010 if (restart)
Alexander Shishkin767ae082016-09-06 16:23:49 +03007011 perf_event_stop(event, 1);
Alexander Shishkin375637b2016-04-27 18:44:46 +03007012}
7013
7014/*
7015 * Adjust all task's events' filters to the new vma
7016 */
7017static void perf_addr_filters_adjust(struct vm_area_struct *vma)
7018{
7019 struct perf_event_context *ctx;
7020 int ctxn;
7021
Mathieu Poirier12b40a22016-07-18 10:43:06 -06007022 /*
7023 * Data tracing isn't supported yet and as such there is no need
7024 * to keep track of anything that isn't related to executable code:
7025 */
7026 if (!(vma->vm_flags & VM_EXEC))
7027 return;
7028
Alexander Shishkin375637b2016-04-27 18:44:46 +03007029 rcu_read_lock();
7030 for_each_task_context_nr(ctxn) {
7031 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
7032 if (!ctx)
7033 continue;
7034
Peter Zijlstraaab5b712016-05-12 17:26:46 +02007035 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
Alexander Shishkin375637b2016-04-27 18:44:46 +03007036 }
7037 rcu_read_unlock();
7038}
7039
Eric B Munson3af9e852010-05-18 15:30:49 +01007040void perf_event_mmap(struct vm_area_struct *vma)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007041{
7042 struct perf_mmap_event mmap_event;
7043
7044 if (!atomic_read(&nr_mmap_events))
7045 return;
7046
7047 mmap_event = (struct perf_mmap_event){
7048 .vma = vma,
7049 /* .file_name */
7050 /* .file_size */
7051 .event_id = {
7052 .header = {
7053 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08007054 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007055 /* .size */
7056 },
7057 /* .pid */
7058 /* .tid */
7059 .start = vma->vm_start,
7060 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01007061 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007062 },
Stephane Eranian13d7a242013-08-21 12:10:24 +02007063 /* .maj (attr_mmap2 only) */
7064 /* .min (attr_mmap2 only) */
7065 /* .ino (attr_mmap2 only) */
7066 /* .ino_generation (attr_mmap2 only) */
Peter Zijlstraf972eb62014-05-19 15:13:47 -04007067 /* .prot (attr_mmap2 only) */
7068 /* .flags (attr_mmap2 only) */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007069 };
7070
Alexander Shishkin375637b2016-04-27 18:44:46 +03007071 perf_addr_filters_adjust(vma);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007072 perf_event_mmap_event(&mmap_event);
7073}
7074
Alexander Shishkin68db7e92015-01-14 14:18:15 +02007075void perf_event_aux_event(struct perf_event *event, unsigned long head,
7076 unsigned long size, u64 flags)
7077{
7078 struct perf_output_handle handle;
7079 struct perf_sample_data sample;
7080 struct perf_aux_event {
7081 struct perf_event_header header;
7082 u64 offset;
7083 u64 size;
7084 u64 flags;
7085 } rec = {
7086 .header = {
7087 .type = PERF_RECORD_AUX,
7088 .misc = 0,
7089 .size = sizeof(rec),
7090 },
7091 .offset = head,
7092 .size = size,
7093 .flags = flags,
7094 };
7095 int ret;
7096
7097 perf_event_header__init_id(&rec.header, &sample, event);
7098 ret = perf_output_begin(&handle, event, rec.header.size);
7099
7100 if (ret)
7101 return;
7102
7103 perf_output_put(&handle, rec);
7104 perf_event__output_id_sample(event, &handle, &sample);
7105
7106 perf_output_end(&handle);
7107}
7108
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007109/*
Kan Liangf38b0db2015-05-10 15:13:14 -04007110 * Lost/dropped samples logging
7111 */
7112void perf_log_lost_samples(struct perf_event *event, u64 lost)
7113{
7114 struct perf_output_handle handle;
7115 struct perf_sample_data sample;
7116 int ret;
7117
7118 struct {
7119 struct perf_event_header header;
7120 u64 lost;
7121 } lost_samples_event = {
7122 .header = {
7123 .type = PERF_RECORD_LOST_SAMPLES,
7124 .misc = 0,
7125 .size = sizeof(lost_samples_event),
7126 },
7127 .lost = lost,
7128 };
7129
7130 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
7131
7132 ret = perf_output_begin(&handle, event,
7133 lost_samples_event.header.size);
7134 if (ret)
7135 return;
7136
7137 perf_output_put(&handle, lost_samples_event);
7138 perf_event__output_id_sample(event, &handle, &sample);
7139 perf_output_end(&handle);
7140}
7141
7142/*
Adrian Hunter45ac1402015-07-21 12:44:02 +03007143 * context_switch tracking
7144 */
7145
7146struct perf_switch_event {
7147 struct task_struct *task;
7148 struct task_struct *next_prev;
7149
7150 struct {
7151 struct perf_event_header header;
7152 u32 next_prev_pid;
7153 u32 next_prev_tid;
7154 } event_id;
7155};
7156
7157static int perf_event_switch_match(struct perf_event *event)
7158{
7159 return event->attr.context_switch;
7160}
7161
7162static void perf_event_switch_output(struct perf_event *event, void *data)
7163{
7164 struct perf_switch_event *se = data;
7165 struct perf_output_handle handle;
7166 struct perf_sample_data sample;
7167 int ret;
7168
7169 if (!perf_event_switch_match(event))
7170 return;
7171
7172 /* Only CPU-wide events are allowed to see next/prev pid/tid */
7173 if (event->ctx->task) {
7174 se->event_id.header.type = PERF_RECORD_SWITCH;
7175 se->event_id.header.size = sizeof(se->event_id.header);
7176 } else {
7177 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
7178 se->event_id.header.size = sizeof(se->event_id);
7179 se->event_id.next_prev_pid =
7180 perf_event_pid(event, se->next_prev);
7181 se->event_id.next_prev_tid =
7182 perf_event_tid(event, se->next_prev);
7183 }
7184
7185 perf_event_header__init_id(&se->event_id.header, &sample, event);
7186
7187 ret = perf_output_begin(&handle, event, se->event_id.header.size);
7188 if (ret)
7189 return;
7190
7191 if (event->ctx->task)
7192 perf_output_put(&handle, se->event_id.header);
7193 else
7194 perf_output_put(&handle, se->event_id);
7195
7196 perf_event__output_id_sample(event, &handle, &sample);
7197
7198 perf_output_end(&handle);
7199}
7200
7201static void perf_event_switch(struct task_struct *task,
7202 struct task_struct *next_prev, bool sched_in)
7203{
7204 struct perf_switch_event switch_event;
7205
7206 /* N.B. caller checks nr_switch_events != 0 */
7207
7208 switch_event = (struct perf_switch_event){
7209 .task = task,
7210 .next_prev = next_prev,
7211 .event_id = {
7212 .header = {
7213 /* .type */
7214 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
7215 /* .size */
7216 },
7217 /* .next_prev_pid */
7218 /* .next_prev_tid */
7219 },
7220 };
7221
Peter Zijlstraaab5b712016-05-12 17:26:46 +02007222 perf_iterate_sb(perf_event_switch_output,
Adrian Hunter45ac1402015-07-21 12:44:02 +03007223 &switch_event,
7224 NULL);
7225}
7226
7227/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007228 * IRQ throttle logging
7229 */
7230
7231static void perf_log_throttle(struct perf_event *event, int enable)
7232{
7233 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02007234 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007235 int ret;
7236
7237 struct {
7238 struct perf_event_header header;
7239 u64 time;
7240 u64 id;
7241 u64 stream_id;
7242 } throttle_event = {
7243 .header = {
7244 .type = PERF_RECORD_THROTTLE,
7245 .misc = 0,
7246 .size = sizeof(throttle_event),
7247 },
Peter Zijlstra34f43922015-02-20 14:05:38 +01007248 .time = perf_event_clock(event),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007249 .id = primary_event_id(event),
7250 .stream_id = event->id,
7251 };
7252
7253 if (enable)
7254 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
7255
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02007256 perf_event_header__init_id(&throttle_event.header, &sample, event);
7257
7258 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02007259 throttle_event.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007260 if (ret)
7261 return;
7262
7263 perf_output_put(&handle, throttle_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02007264 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007265 perf_output_end(&handle);
7266}
7267
Alexander Shishkin8d4e6c42017-03-30 18:39:56 +03007268void perf_event_itrace_started(struct perf_event *event)
7269{
7270 event->attach_state |= PERF_ATTACH_ITRACE;
7271}
7272
Alexander Shishkinec0d7722015-01-14 14:18:23 +02007273static void perf_log_itrace_start(struct perf_event *event)
7274{
7275 struct perf_output_handle handle;
7276 struct perf_sample_data sample;
7277 struct perf_aux_event {
7278 struct perf_event_header header;
7279 u32 pid;
7280 u32 tid;
7281 } rec;
7282 int ret;
7283
7284 if (event->parent)
7285 event = event->parent;
7286
7287 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
Alexander Shishkin8d4e6c42017-03-30 18:39:56 +03007288 event->attach_state & PERF_ATTACH_ITRACE)
Alexander Shishkinec0d7722015-01-14 14:18:23 +02007289 return;
7290
Alexander Shishkinec0d7722015-01-14 14:18:23 +02007291 rec.header.type = PERF_RECORD_ITRACE_START;
7292 rec.header.misc = 0;
7293 rec.header.size = sizeof(rec);
7294 rec.pid = perf_event_pid(event, current);
7295 rec.tid = perf_event_tid(event, current);
7296
7297 perf_event_header__init_id(&rec.header, &sample, event);
7298 ret = perf_output_begin(&handle, event, rec.header.size);
7299
7300 if (ret)
7301 return;
7302
7303 perf_output_put(&handle, rec);
7304 perf_event__output_id_sample(event, &handle, &sample);
7305
7306 perf_output_end(&handle);
7307}
7308
Jiri Olsa475113d2016-12-28 14:31:03 +01007309static int
7310__perf_event_account_interrupt(struct perf_event *event, int throttle)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007311{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007312 struct hw_perf_event *hwc = &event->hw;
7313 int ret = 0;
Jiri Olsa475113d2016-12-28 14:31:03 +01007314 u64 seq;
Peter Zijlstra96398822010-11-24 18:55:29 +01007315
Stephane Eraniane050e3f2012-01-26 17:03:19 +01007316 seq = __this_cpu_read(perf_throttled_seq);
7317 if (seq != hwc->interrupts_seq) {
7318 hwc->interrupts_seq = seq;
7319 hwc->interrupts = 1;
7320 } else {
7321 hwc->interrupts++;
7322 if (unlikely(throttle
7323 && hwc->interrupts >= max_samples_per_tick)) {
7324 __this_cpu_inc(perf_throttled_count);
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02007325 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
Peter Zijlstra163ec432011-02-16 11:22:34 +01007326 hwc->interrupts = MAX_INTERRUPTS;
7327 perf_log_throttle(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007328 ret = 1;
7329 }
Stephane Eraniane050e3f2012-01-26 17:03:19 +01007330 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007331
7332 if (event->attr.freq) {
7333 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01007334 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007335
Peter Zijlstraabd50712010-01-26 18:50:16 +01007336 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007337
Peter Zijlstraabd50712010-01-26 18:50:16 +01007338 if (delta > 0 && delta < 2*TICK_NSEC)
Stephane Eranianf39d47f2012-02-07 14:39:57 +01007339 perf_adjust_period(event, delta, hwc->last_period, true);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007340 }
7341
Jiri Olsa475113d2016-12-28 14:31:03 +01007342 return ret;
7343}
7344
7345int perf_event_account_interrupt(struct perf_event *event)
7346{
7347 return __perf_event_account_interrupt(event, 1);
7348}
7349
7350/*
7351 * Generic event overflow handling, sampling.
7352 */
7353
7354static int __perf_event_overflow(struct perf_event *event,
7355 int throttle, struct perf_sample_data *data,
7356 struct pt_regs *regs)
7357{
7358 int events = atomic_read(&event->event_limit);
7359 int ret = 0;
7360
7361 /*
7362 * Non-sampling counters might still use the PMI to fold short
7363 * hardware counters, ignore those.
7364 */
7365 if (unlikely(!is_sampling_event(event)))
7366 return 0;
7367
7368 ret = __perf_event_account_interrupt(event, throttle);
7369
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007370 /*
7371 * XXX event_limit might not quite work as expected on inherited
7372 * events
7373 */
7374
7375 event->pending_kill = POLL_IN;
7376 if (events && atomic_dec_and_test(&event->event_limit)) {
7377 ret = 1;
7378 event->pending_kill = POLL_HUP;
Jiri Olsa5aab90c2016-10-26 11:48:24 +02007379
7380 perf_event_disable_inatomic(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007381 }
7382
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07007383 READ_ONCE(event->overflow_handler)(event, data, regs);
Peter Zijlstra453f19e2009-11-20 22:19:43 +01007384
Peter Zijlstrafed66e2cd2015-06-11 10:32:01 +02007385 if (*perf_event_fasync(event) && event->pending_kill) {
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007386 event->pending_wakeup = 1;
7387 irq_work_queue(&event->pending);
Peter Zijlstraf506b3d2011-05-26 17:02:53 +02007388 }
7389
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007390 return ret;
7391}
7392
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007393int perf_event_overflow(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007394 struct perf_sample_data *data,
7395 struct pt_regs *regs)
7396{
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007397 return __perf_event_overflow(event, 1, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007398}
7399
7400/*
7401 * Generic software event infrastructure
7402 */
7403
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007404struct swevent_htable {
7405 struct swevent_hlist *swevent_hlist;
7406 struct mutex hlist_mutex;
7407 int hlist_refcount;
7408
7409 /* Recursion avoidance in each contexts */
7410 int recursion[PERF_NR_CONTEXTS];
7411};
7412
7413static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
7414
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007415/*
7416 * We directly increment event->count and keep a second value in
7417 * event->hw.period_left to count intervals. This period event
7418 * is kept in the range [-sample_period, 0] so that we can use the
7419 * sign as trigger.
7420 */
7421
Jiri Olsaab573842013-05-01 17:25:44 +02007422u64 perf_swevent_set_period(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007423{
7424 struct hw_perf_event *hwc = &event->hw;
7425 u64 period = hwc->last_period;
7426 u64 nr, offset;
7427 s64 old, val;
7428
7429 hwc->last_period = hwc->sample_period;
7430
7431again:
Peter Zijlstrae7850592010-05-21 14:43:08 +02007432 old = val = local64_read(&hwc->period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007433 if (val < 0)
7434 return 0;
7435
7436 nr = div64_u64(period + val, period);
7437 offset = nr * period;
7438 val -= offset;
Peter Zijlstrae7850592010-05-21 14:43:08 +02007439 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007440 goto again;
7441
7442 return nr;
7443}
7444
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007445static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007446 struct perf_sample_data *data,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007447 struct pt_regs *regs)
7448{
7449 struct hw_perf_event *hwc = &event->hw;
7450 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007451
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007452 if (!overflow)
7453 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007454
7455 if (hwc->interrupts == MAX_INTERRUPTS)
7456 return;
7457
7458 for (; overflow; overflow--) {
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007459 if (__perf_event_overflow(event, throttle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007460 data, regs)) {
7461 /*
7462 * We inhibit the overflow from happening when
7463 * hwc->interrupts == MAX_INTERRUPTS.
7464 */
7465 break;
7466 }
7467 throttle = 1;
7468 }
7469}
7470
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007471static void perf_swevent_event(struct perf_event *event, u64 nr,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007472 struct perf_sample_data *data,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007473 struct pt_regs *regs)
7474{
7475 struct hw_perf_event *hwc = &event->hw;
7476
Peter Zijlstrae7850592010-05-21 14:43:08 +02007477 local64_add(nr, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007478
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007479 if (!regs)
7480 return;
7481
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01007482 if (!is_sampling_event(event))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007483 return;
7484
Andrew Vagin5d81e5c2011-11-07 15:54:12 +03007485 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7486 data->period = nr;
7487 return perf_swevent_overflow(event, 1, data, regs);
7488 } else
7489 data->period = event->hw.last_period;
7490
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007491 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007492 return perf_swevent_overflow(event, 1, data, regs);
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007493
Peter Zijlstrae7850592010-05-21 14:43:08 +02007494 if (local64_add_negative(nr, &hwc->period_left))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01007495 return;
7496
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007497 perf_swevent_overflow(event, 0, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007498}
7499
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01007500static int perf_exclude_event(struct perf_event *event,
7501 struct pt_regs *regs)
7502{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007503 if (event->hw.state & PERF_HES_STOPPED)
Frederic Weisbecker91b2f482011-03-07 21:27:08 +01007504 return 1;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007505
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01007506 if (regs) {
7507 if (event->attr.exclude_user && user_mode(regs))
7508 return 1;
7509
7510 if (event->attr.exclude_kernel && !user_mode(regs))
7511 return 1;
7512 }
7513
7514 return 0;
7515}
7516
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007517static int perf_swevent_match(struct perf_event *event,
7518 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08007519 u32 event_id,
7520 struct perf_sample_data *data,
7521 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007522{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007523 if (event->attr.type != type)
7524 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01007525
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007526 if (event->attr.config != event_id)
7527 return 0;
7528
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01007529 if (perf_exclude_event(event, regs))
7530 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007531
7532 return 1;
7533}
7534
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007535static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007536{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007537 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007538
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007539 return hash_64(val, SWEVENT_HLIST_BITS);
7540}
7541
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007542static inline struct hlist_head *
7543__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007544{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007545 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007546
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007547 return &hlist->heads[hash];
7548}
7549
7550/* For the read side: events when they trigger */
7551static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007552find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007553{
7554 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007555
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007556 hlist = rcu_dereference(swhash->swevent_hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007557 if (!hlist)
7558 return NULL;
7559
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007560 return __find_swevent_head(hlist, type, event_id);
7561}
7562
7563/* For the event head insertion and removal in the hlist */
7564static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007565find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007566{
7567 struct swevent_hlist *hlist;
7568 u32 event_id = event->attr.config;
7569 u64 type = event->attr.type;
7570
7571 /*
7572 * Event scheduling is always serialized against hlist allocation
7573 * and release. Which makes the protected version suitable here.
7574 * The context lock guarantees that.
7575 */
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007576 hlist = rcu_dereference_protected(swhash->swevent_hlist,
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007577 lockdep_is_held(&event->ctx->lock));
7578 if (!hlist)
7579 return NULL;
7580
7581 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007582}
7583
7584static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007585 u64 nr,
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007586 struct perf_sample_data *data,
7587 struct pt_regs *regs)
7588{
Christoph Lameter4a32fea2014-08-17 12:30:27 -05007589 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007590 struct perf_event *event;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007591 struct hlist_head *head;
7592
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007593 rcu_read_lock();
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007594 head = find_swevent_head_rcu(swhash, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007595 if (!head)
7596 goto end;
7597
Sasha Levinb67bfe02013-02-27 17:06:00 -08007598 hlist_for_each_entry_rcu(event, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08007599 if (perf_swevent_match(event, type, event_id, data, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007600 perf_swevent_event(event, nr, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007601 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007602end:
7603 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007604}
7605
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007606DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7607
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01007608int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007609{
Christoph Lameter4a32fea2014-08-17 12:30:27 -05007610 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01007611
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007612 return get_recursion_context(swhash->recursion);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007613}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01007614EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007615
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -07007616void perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007617{
Christoph Lameter4a32fea2014-08-17 12:30:27 -05007618 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02007619
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007620 put_recursion_context(swhash->recursion, rctx);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01007621}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007622
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007623void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007624{
Ingo Molnara4234bf2009-11-23 10:57:59 +01007625 struct perf_sample_data data;
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007626
7627 if (WARN_ON_ONCE(!regs))
7628 return;
7629
7630 perf_sample_data_init(&data, addr, 0);
7631 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
7632}
7633
7634void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7635{
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01007636 int rctx;
7637
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007638 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01007639 rctx = perf_swevent_get_recursion_context();
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007640 if (unlikely(rctx < 0))
7641 goto fail;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007642
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007643 ___perf_sw_event(event_id, nr, regs, addr);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01007644
7645 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra (Intel)86038c52014-12-16 12:47:34 +01007646fail:
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007647 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007648}
7649
7650static void perf_swevent_read(struct perf_event *event)
7651{
7652}
7653
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007654static int perf_swevent_add(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007655{
Christoph Lameter4a32fea2014-08-17 12:30:27 -05007656 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007657 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007658 struct hlist_head *head;
7659
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01007660 if (is_sampling_event(event)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007661 hwc->last_period = hwc->sample_period;
7662 perf_swevent_set_period(event);
7663 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007664
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007665 hwc->state = !(flags & PERF_EF_START);
7666
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007667 head = find_swevent_head(swhash, event);
Peter Zijlstra12ca6ad2015-12-15 13:49:05 +01007668 if (WARN_ON_ONCE(!head))
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007669 return -EINVAL;
7670
7671 hlist_add_head_rcu(&event->hlist_entry, head);
Shaohua Li6a694a62015-02-05 15:55:32 -08007672 perf_event_update_userpage(event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007673
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007674 return 0;
7675}
7676
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007677static void perf_swevent_del(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007678{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007679 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007680}
7681
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007682static void perf_swevent_start(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02007683{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007684 event->hw.state = 0;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02007685}
7686
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007687static void perf_swevent_stop(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02007688{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007689 event->hw.state = PERF_HES_STOPPED;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02007690}
7691
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007692/* Deref the hlist from the update side */
7693static inline struct swevent_hlist *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007694swevent_hlist_deref(struct swevent_htable *swhash)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007695{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007696 return rcu_dereference_protected(swhash->swevent_hlist,
7697 lockdep_is_held(&swhash->hlist_mutex));
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007698}
7699
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007700static void swevent_hlist_release(struct swevent_htable *swhash)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007701{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007702 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007703
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02007704 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007705 return;
7706
Andreea-Cristina Bernat70691d42014-08-22 16:26:05 +03007707 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
Lai Jiangshanfa4bbc42011-03-18 12:08:29 +08007708 kfree_rcu(hlist, rcu_head);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007709}
7710
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007711static void swevent_hlist_put_cpu(int cpu)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007712{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007713 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007714
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007715 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007716
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007717 if (!--swhash->hlist_refcount)
7718 swevent_hlist_release(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007719
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007720 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007721}
7722
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007723static void swevent_hlist_put(void)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007724{
7725 int cpu;
7726
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007727 for_each_possible_cpu(cpu)
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007728 swevent_hlist_put_cpu(cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007729}
7730
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007731static int swevent_hlist_get_cpu(int cpu)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007732{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007733 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007734 int err = 0;
7735
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007736 mutex_lock(&swhash->hlist_mutex);
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02007737 if (!swevent_hlist_deref(swhash) &&
7738 cpumask_test_cpu(cpu, perf_online_mask)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007739 struct swevent_hlist *hlist;
7740
7741 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
7742 if (!hlist) {
7743 err = -ENOMEM;
7744 goto exit;
7745 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007746 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007747 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007748 swhash->hlist_refcount++;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02007749exit:
Peter Zijlstrab28ab832010-09-06 14:48:15 +02007750 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007751
7752 return err;
7753}
7754
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007755static int swevent_hlist_get(void)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007756{
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007757 int err, cpu, failed_cpu;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007758
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02007759 mutex_lock(&pmus_lock);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007760 for_each_possible_cpu(cpu) {
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007761 err = swevent_hlist_get_cpu(cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007762 if (err) {
7763 failed_cpu = cpu;
7764 goto fail;
7765 }
7766 }
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02007767 mutex_unlock(&pmus_lock);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007768 return 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02007769fail:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007770 for_each_possible_cpu(cpu) {
7771 if (cpu == failed_cpu)
7772 break;
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007773 swevent_hlist_put_cpu(cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007774 }
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02007775 mutex_unlock(&pmus_lock);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007776 return err;
7777}
7778
Ingo Molnarc5905af2012-02-24 08:31:31 +01007779struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007780
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007781static void sw_perf_event_destroy(struct perf_event *event)
7782{
7783 u64 event_id = event->attr.config;
7784
7785 WARN_ON(event->parent);
7786
Ingo Molnarc5905af2012-02-24 08:31:31 +01007787 static_key_slow_dec(&perf_swevent_enabled[event_id]);
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007788 swevent_hlist_put();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007789}
7790
7791static int perf_swevent_init(struct perf_event *event)
7792{
Tommi Rantala8176cce2013-04-13 22:49:14 +03007793 u64 event_id = event->attr.config;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007794
7795 if (event->attr.type != PERF_TYPE_SOFTWARE)
7796 return -ENOENT;
7797
Stephane Eranian2481c5f2012-02-09 23:20:59 +01007798 /*
7799 * no branch sampling for software events
7800 */
7801 if (has_branch_stack(event))
7802 return -EOPNOTSUPP;
7803
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007804 switch (event_id) {
7805 case PERF_COUNT_SW_CPU_CLOCK:
7806 case PERF_COUNT_SW_TASK_CLOCK:
7807 return -ENOENT;
7808
7809 default:
7810 break;
7811 }
7812
Dan Carpenterce677832010-10-24 21:50:42 +02007813 if (event_id >= PERF_COUNT_SW_MAX)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007814 return -ENOENT;
7815
7816 if (!event->parent) {
7817 int err;
7818
Thomas Gleixner3b364d72016-02-09 20:11:40 +00007819 err = swevent_hlist_get();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007820 if (err)
7821 return err;
7822
Ingo Molnarc5905af2012-02-24 08:31:31 +01007823 static_key_slow_inc(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007824 event->destroy = sw_perf_event_destroy;
7825 }
7826
7827 return 0;
7828}
7829
7830static struct pmu perf_swevent = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02007831 .task_ctx_nr = perf_sw_context,
7832
Peter Zijlstra34f43922015-02-20 14:05:38 +01007833 .capabilities = PERF_PMU_CAP_NO_NMI,
7834
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007835 .event_init = perf_swevent_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007836 .add = perf_swevent_add,
7837 .del = perf_swevent_del,
7838 .start = perf_swevent_start,
7839 .stop = perf_swevent_stop,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007840 .read = perf_swevent_read,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007841};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007842
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007843#ifdef CONFIG_EVENT_TRACING
7844
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007845static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007846 struct perf_sample_data *data)
7847{
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02007848 void *record = data->raw->frag.data;
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007849
Peter Zijlstrab71b4372015-11-02 10:50:51 +01007850 /* only top level events have filters set */
7851 if (event->parent)
7852 event = event->parent;
7853
Frederic Weisbecker95476b62010-04-14 23:42:18 +02007854 if (likely(!event->filter) || filter_match_preds(event->filter, record))
7855 return 1;
7856 return 0;
7857}
7858
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007859static int perf_tp_event_match(struct perf_event *event,
7860 struct perf_sample_data *data,
7861 struct pt_regs *regs)
7862{
Frederic Weisbeckera0f7d0f2011-03-07 21:27:09 +01007863 if (event->hw.state & PERF_HES_STOPPED)
7864 return 0;
Peter Zijlstra580d6072010-05-20 20:54:31 +02007865 /*
7866 * All tracepoints are from kernel-space.
7867 */
7868 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007869 return 0;
7870
7871 if (!perf_tp_filter_match(event, data))
7872 return 0;
7873
7874 return 1;
7875}
7876
Alexei Starovoitov85b67bc2016-04-18 20:11:50 -07007877void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7878 struct trace_event_call *call, u64 count,
7879 struct pt_regs *regs, struct hlist_head *head,
7880 struct task_struct *task)
7881{
Yonghong Songe87c6bc2017-10-23 23:53:08 -07007882 if (bpf_prog_array_valid(call)) {
Alexei Starovoitov85b67bc2016-04-18 20:11:50 -07007883 *(struct pt_regs **)raw_data = regs;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07007884 if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) {
Alexei Starovoitov85b67bc2016-04-18 20:11:50 -07007885 perf_swevent_put_recursion_context(rctx);
7886 return;
7887 }
7888 }
7889 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02007890 rctx, task);
Alexei Starovoitov85b67bc2016-04-18 20:11:50 -07007891}
7892EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7893
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07007894void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
Andrew Vagine6dab5f2012-07-11 18:14:58 +04007895 struct pt_regs *regs, struct hlist_head *head, int rctx,
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02007896 struct task_struct *task)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007897{
7898 struct perf_sample_data data;
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02007899 struct perf_event *event;
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007900
7901 struct perf_raw_record raw = {
Daniel Borkmann7e3f9772016-07-14 18:08:03 +02007902 .frag = {
7903 .size = entry_size,
7904 .data = record,
7905 },
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007906 };
7907
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07007908 perf_sample_data_init(&data, 0, 0);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007909 data.raw = &raw;
7910
Alexei Starovoitov1e1dcd92016-04-06 18:43:24 -07007911 perf_trace_buf_update(record, event_type);
7912
Peter Zijlstra8fd0fbb2017-10-11 09:45:29 +02007913 hlist_for_each_entry_rcu(event, head, hlist_entry) {
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007914 if (perf_tp_event_match(event, &data, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02007915 perf_swevent_event(event, count, &data, regs);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007916 }
Peter Zijlstraecc55f82010-05-21 15:11:34 +02007917
Andrew Vagine6dab5f2012-07-11 18:14:58 +04007918 /*
7919 * If we got specified a target task, also iterate its context and
7920 * deliver this event there too.
7921 */
7922 if (task && task != current) {
7923 struct perf_event_context *ctx;
7924 struct trace_entry *entry = record;
7925
7926 rcu_read_lock();
7927 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
7928 if (!ctx)
7929 goto unlock;
7930
7931 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7932 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7933 continue;
7934 if (event->attr.config != entry->type)
7935 continue;
7936 if (perf_tp_event_match(event, &data, regs))
7937 perf_swevent_event(event, count, &data, regs);
7938 }
7939unlock:
7940 rcu_read_unlock();
7941 }
7942
Peter Zijlstraecc55f82010-05-21 15:11:34 +02007943 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007944}
7945EXPORT_SYMBOL_GPL(perf_tp_event);
7946
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007947static void tp_perf_event_destroy(struct perf_event *event)
7948{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007949 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007950}
7951
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007952static int perf_tp_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007953{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02007954 int err;
7955
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007956 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7957 return -ENOENT;
7958
Stephane Eranian2481c5f2012-02-09 23:20:59 +01007959 /*
7960 * no branch sampling for tracepoint events
7961 */
7962 if (has_branch_stack(event))
7963 return -EOPNOTSUPP;
7964
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02007965 err = perf_trace_init(event);
7966 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007967 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007968
7969 event->destroy = tp_perf_event_destroy;
7970
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007971 return 0;
7972}
7973
7974static struct pmu perf_tracepoint = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02007975 .task_ctx_nr = perf_sw_context,
7976
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007977 .event_init = perf_tp_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02007978 .add = perf_trace_add,
7979 .del = perf_trace_del,
7980 .start = perf_swevent_start,
7981 .stop = perf_swevent_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007982 .read = perf_swevent_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007983};
7984
7985static inline void perf_tp_register(void)
7986{
Peter Zijlstra2e80a822010-11-17 23:17:36 +01007987 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007988}
Li Zefan6fb29152009-10-15 11:21:42 +08007989
Li Zefan6fb29152009-10-15 11:21:42 +08007990static void perf_event_free_filter(struct perf_event *event)
7991{
7992 ftrace_profile_free_filter(event);
7993}
7994
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07007995#ifdef CONFIG_BPF_SYSCALL
7996static void bpf_overflow_handler(struct perf_event *event,
7997 struct perf_sample_data *data,
7998 struct pt_regs *regs)
7999{
8000 struct bpf_perf_event_data_kern ctx = {
8001 .data = data,
Yonghong Song7d9285e2017-10-05 09:19:19 -07008002 .event = event,
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07008003 };
8004 int ret = 0;
8005
Hendrik Bruecknerc895f6f2017-12-04 10:56:44 +01008006 ctx.regs = perf_arch_bpf_user_pt_regs(regs);
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07008007 preempt_disable();
8008 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
8009 goto out;
8010 rcu_read_lock();
Daniel Borkmann88575192016-11-26 01:28:04 +01008011 ret = BPF_PROG_RUN(event->prog, &ctx);
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07008012 rcu_read_unlock();
8013out:
8014 __this_cpu_dec(bpf_prog_active);
8015 preempt_enable();
8016 if (!ret)
8017 return;
8018
8019 event->orig_overflow_handler(event, data, regs);
8020}
8021
8022static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
8023{
8024 struct bpf_prog *prog;
8025
8026 if (event->overflow_handler_context)
8027 /* hw breakpoint or kernel counter */
8028 return -EINVAL;
8029
8030 if (event->prog)
8031 return -EEXIST;
8032
8033 prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
8034 if (IS_ERR(prog))
8035 return PTR_ERR(prog);
8036
8037 event->prog = prog;
8038 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
8039 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
8040 return 0;
8041}
8042
8043static void perf_event_free_bpf_handler(struct perf_event *event)
8044{
8045 struct bpf_prog *prog = event->prog;
8046
8047 if (!prog)
8048 return;
8049
8050 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
8051 event->prog = NULL;
8052 bpf_prog_put(prog);
8053}
8054#else
8055static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
8056{
8057 return -EOPNOTSUPP;
8058}
8059static void perf_event_free_bpf_handler(struct perf_event *event)
8060{
8061}
8062#endif
8063
Alexei Starovoitov25415172015-03-25 12:49:20 -07008064static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
8065{
Yonghong Songcf5f5ce2017-08-04 16:00:09 -07008066 bool is_kprobe, is_tracepoint, is_syscall_tp;
Alexei Starovoitov25415172015-03-25 12:49:20 -07008067 struct bpf_prog *prog;
Yonghong Songe87c6bc2017-10-23 23:53:08 -07008068 int ret;
Alexei Starovoitov25415172015-03-25 12:49:20 -07008069
8070 if (event->attr.type != PERF_TYPE_TRACEPOINT)
Alexei Starovoitovf91840a2017-06-02 21:03:52 -07008071 return perf_event_set_bpf_handler(event, prog_fd);
Alexei Starovoitov25415172015-03-25 12:49:20 -07008072
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -07008073 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
8074 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
Yonghong Songcf5f5ce2017-08-04 16:00:09 -07008075 is_syscall_tp = is_syscall_trace_event(event->tp_event);
8076 if (!is_kprobe && !is_tracepoint && !is_syscall_tp)
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -07008077 /* bpf programs can only be attached to u/kprobe or tracepoint */
Alexei Starovoitov25415172015-03-25 12:49:20 -07008078 return -EINVAL;
8079
8080 prog = bpf_prog_get(prog_fd);
8081 if (IS_ERR(prog))
8082 return PTR_ERR(prog);
8083
Alexei Starovoitov98b5c2c2016-04-06 18:43:25 -07008084 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
Yonghong Songcf5f5ce2017-08-04 16:00:09 -07008085 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
8086 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
Alexei Starovoitov25415172015-03-25 12:49:20 -07008087 /* valid fd, but invalid bpf program type */
8088 bpf_prog_put(prog);
8089 return -EINVAL;
8090 }
8091
Yonghong Songcf5f5ce2017-08-04 16:00:09 -07008092 if (is_tracepoint || is_syscall_tp) {
Alexei Starovoitov32bbe002016-04-06 18:43:28 -07008093 int off = trace_event_get_offsets(event->tp_event);
8094
8095 if (prog->aux->max_ctx_offset > off) {
8096 bpf_prog_put(prog);
8097 return -EACCES;
8098 }
8099 }
Alexei Starovoitov25415172015-03-25 12:49:20 -07008100
Yonghong Songe87c6bc2017-10-23 23:53:08 -07008101 ret = perf_event_attach_bpf_prog(event, prog);
8102 if (ret)
8103 bpf_prog_put(prog);
8104 return ret;
Alexei Starovoitov25415172015-03-25 12:49:20 -07008105}
8106
8107static void perf_event_free_bpf_prog(struct perf_event *event)
8108{
Yonghong Song0b4c6842017-10-23 23:53:07 -07008109 if (event->attr.type != PERF_TYPE_TRACEPOINT) {
8110 perf_event_free_bpf_handler(event);
Alexei Starovoitov25415172015-03-25 12:49:20 -07008111 return;
Alexei Starovoitov25415172015-03-25 12:49:20 -07008112 }
Yonghong Songe87c6bc2017-10-23 23:53:08 -07008113 perf_event_detach_bpf_prog(event);
Alexei Starovoitov25415172015-03-25 12:49:20 -07008114}
8115
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008116#else
Li Zefan6fb29152009-10-15 11:21:42 +08008117
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008118static inline void perf_tp_register(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008119{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008120}
Li Zefan6fb29152009-10-15 11:21:42 +08008121
Li Zefan6fb29152009-10-15 11:21:42 +08008122static void perf_event_free_filter(struct perf_event *event)
8123{
8124}
8125
Alexei Starovoitov25415172015-03-25 12:49:20 -07008126static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
8127{
8128 return -ENOENT;
8129}
8130
8131static void perf_event_free_bpf_prog(struct perf_event *event)
8132{
8133}
Li Zefan07b139c2009-12-21 14:27:35 +08008134#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008135
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02008136#ifdef CONFIG_HAVE_HW_BREAKPOINT
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01008137void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02008138{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01008139 struct perf_sample_data sample;
8140 struct pt_regs *regs = data;
8141
Robert Richterfd0d0002012-04-02 20:19:08 +02008142 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01008143
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008144 if (!bp->hw.state && !perf_exclude_event(bp, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02008145 perf_swevent_event(bp, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02008146}
8147#endif
8148
Alexander Shishkin375637b2016-04-27 18:44:46 +03008149/*
8150 * Allocate a new address filter
8151 */
8152static struct perf_addr_filter *
8153perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
8154{
8155 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
8156 struct perf_addr_filter *filter;
8157
8158 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
8159 if (!filter)
8160 return NULL;
8161
8162 INIT_LIST_HEAD(&filter->entry);
8163 list_add_tail(&filter->entry, filters);
8164
8165 return filter;
8166}
8167
8168static void free_filters_list(struct list_head *filters)
8169{
8170 struct perf_addr_filter *filter, *iter;
8171
8172 list_for_each_entry_safe(filter, iter, filters, entry) {
8173 if (filter->inode)
8174 iput(filter->inode);
8175 list_del(&filter->entry);
8176 kfree(filter);
8177 }
8178}
8179
8180/*
8181 * Free existing address filters and optionally install new ones
8182 */
8183static void perf_addr_filters_splice(struct perf_event *event,
8184 struct list_head *head)
8185{
8186 unsigned long flags;
8187 LIST_HEAD(list);
8188
8189 if (!has_addr_filter(event))
8190 return;
8191
8192 /* don't bother with children, they don't have their own filters */
8193 if (event->parent)
8194 return;
8195
8196 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
8197
8198 list_splice_init(&event->addr_filters.list, &list);
8199 if (head)
8200 list_splice(head, &event->addr_filters.list);
8201
8202 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
8203
8204 free_filters_list(&list);
8205}
8206
8207/*
8208 * Scan through mm's vmas and see if one of them matches the
8209 * @filter; if so, adjust filter's address range.
8210 * Called with mm::mmap_sem down for reading.
8211 */
8212static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
8213 struct mm_struct *mm)
8214{
8215 struct vm_area_struct *vma;
8216
8217 for (vma = mm->mmap; vma; vma = vma->vm_next) {
8218 struct file *file = vma->vm_file;
8219 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
8220 unsigned long vma_size = vma->vm_end - vma->vm_start;
8221
8222 if (!file)
8223 continue;
8224
8225 if (!perf_addr_filter_match(filter, file, off, vma_size))
8226 continue;
8227
8228 return vma->vm_start;
8229 }
8230
8231 return 0;
8232}
8233
8234/*
8235 * Update event's address range filters based on the
8236 * task's existing mappings, if any.
8237 */
8238static void perf_event_addr_filters_apply(struct perf_event *event)
8239{
8240 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8241 struct task_struct *task = READ_ONCE(event->ctx->task);
8242 struct perf_addr_filter *filter;
8243 struct mm_struct *mm = NULL;
8244 unsigned int count = 0;
8245 unsigned long flags;
8246
8247 /*
8248 * We may observe TASK_TOMBSTONE, which means that the event tear-down
8249 * will stop on the parent's child_mutex that our caller is also holding
8250 */
8251 if (task == TASK_TOMBSTONE)
8252 return;
8253
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008254 if (!ifh->nr_file_filters)
8255 return;
8256
Alexander Shishkin375637b2016-04-27 18:44:46 +03008257 mm = get_task_mm(event->ctx->task);
8258 if (!mm)
8259 goto restart;
8260
8261 down_read(&mm->mmap_sem);
8262
8263 raw_spin_lock_irqsave(&ifh->lock, flags);
8264 list_for_each_entry(filter, &ifh->list, entry) {
8265 event->addr_filters_offs[count] = 0;
8266
Mathieu Poirier99f5bc92016-07-18 10:43:07 -06008267 /*
8268 * Adjust base offset if the filter is associated to a binary
8269 * that needs to be mapped:
8270 */
8271 if (filter->inode)
Alexander Shishkin375637b2016-04-27 18:44:46 +03008272 event->addr_filters_offs[count] =
8273 perf_addr_filter_apply(filter, mm);
8274
8275 count++;
8276 }
8277
8278 event->addr_filters_gen++;
8279 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8280
8281 up_read(&mm->mmap_sem);
8282
8283 mmput(mm);
8284
8285restart:
Alexander Shishkin767ae082016-09-06 16:23:49 +03008286 perf_event_stop(event, 1);
Alexander Shishkin375637b2016-04-27 18:44:46 +03008287}
8288
8289/*
8290 * Address range filtering: limiting the data to certain
8291 * instruction address ranges. Filters are ioctl()ed to us from
8292 * userspace as ascii strings.
8293 *
8294 * Filter string format:
8295 *
8296 * ACTION RANGE_SPEC
8297 * where ACTION is one of the
8298 * * "filter": limit the trace to this region
8299 * * "start": start tracing from this address
8300 * * "stop": stop tracing at this address/region;
8301 * RANGE_SPEC is
8302 * * for kernel addresses: <start address>[/<size>]
8303 * * for object files: <start address>[/<size>]@</path/to/object/file>
8304 *
8305 * if <size> is not specified, the range is treated as a single address.
8306 */
8307enum {
Alexander Shishkine96271f2016-11-18 13:38:43 +02008308 IF_ACT_NONE = -1,
Alexander Shishkin375637b2016-04-27 18:44:46 +03008309 IF_ACT_FILTER,
8310 IF_ACT_START,
8311 IF_ACT_STOP,
8312 IF_SRC_FILE,
8313 IF_SRC_KERNEL,
8314 IF_SRC_FILEADDR,
8315 IF_SRC_KERNELADDR,
8316};
8317
8318enum {
8319 IF_STATE_ACTION = 0,
8320 IF_STATE_SOURCE,
8321 IF_STATE_END,
8322};
8323
8324static const match_table_t if_tokens = {
8325 { IF_ACT_FILTER, "filter" },
8326 { IF_ACT_START, "start" },
8327 { IF_ACT_STOP, "stop" },
8328 { IF_SRC_FILE, "%u/%u@%s" },
8329 { IF_SRC_KERNEL, "%u/%u" },
8330 { IF_SRC_FILEADDR, "%u@%s" },
8331 { IF_SRC_KERNELADDR, "%u" },
Alexander Shishkine96271f2016-11-18 13:38:43 +02008332 { IF_ACT_NONE, NULL },
Alexander Shishkin375637b2016-04-27 18:44:46 +03008333};
8334
8335/*
8336 * Address filter string parser
8337 */
8338static int
8339perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
8340 struct list_head *filters)
8341{
8342 struct perf_addr_filter *filter = NULL;
8343 char *start, *orig, *filename = NULL;
8344 struct path path;
8345 substring_t args[MAX_OPT_ARGS];
8346 int state = IF_STATE_ACTION, token;
8347 unsigned int kernel = 0;
8348 int ret = -EINVAL;
8349
8350 orig = fstr = kstrdup(fstr, GFP_KERNEL);
8351 if (!fstr)
8352 return -ENOMEM;
8353
8354 while ((start = strsep(&fstr, " ,\n")) != NULL) {
8355 ret = -EINVAL;
8356
8357 if (!*start)
8358 continue;
8359
8360 /* filter definition begins */
8361 if (state == IF_STATE_ACTION) {
8362 filter = perf_addr_filter_new(event, filters);
8363 if (!filter)
8364 goto fail;
8365 }
8366
8367 token = match_token(start, if_tokens, args);
8368 switch (token) {
8369 case IF_ACT_FILTER:
8370 case IF_ACT_START:
8371 filter->filter = 1;
8372
8373 case IF_ACT_STOP:
8374 if (state != IF_STATE_ACTION)
8375 goto fail;
8376
8377 state = IF_STATE_SOURCE;
8378 break;
8379
8380 case IF_SRC_KERNELADDR:
8381 case IF_SRC_KERNEL:
8382 kernel = 1;
8383
8384 case IF_SRC_FILEADDR:
8385 case IF_SRC_FILE:
8386 if (state != IF_STATE_SOURCE)
8387 goto fail;
8388
8389 if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
8390 filter->range = 1;
8391
8392 *args[0].to = 0;
8393 ret = kstrtoul(args[0].from, 0, &filter->offset);
8394 if (ret)
8395 goto fail;
8396
8397 if (filter->range) {
8398 *args[1].to = 0;
8399 ret = kstrtoul(args[1].from, 0, &filter->size);
8400 if (ret)
8401 goto fail;
8402 }
8403
Mathieu Poirier4059ffd2016-07-18 10:43:05 -06008404 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
8405 int fpos = filter->range ? 2 : 1;
8406
8407 filename = match_strdup(&args[fpos]);
Alexander Shishkin375637b2016-04-27 18:44:46 +03008408 if (!filename) {
8409 ret = -ENOMEM;
8410 goto fail;
8411 }
8412 }
8413
8414 state = IF_STATE_END;
8415 break;
8416
8417 default:
8418 goto fail;
8419 }
8420
8421 /*
8422 * Filter definition is fully parsed, validate and install it.
8423 * Make sure that it doesn't contradict itself or the event's
8424 * attribute.
8425 */
8426 if (state == IF_STATE_END) {
Alexander Shishkin9ccbfbb2017-01-26 11:40:56 +02008427 ret = -EINVAL;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008428 if (kernel && event->attr.exclude_kernel)
8429 goto fail;
8430
8431 if (!kernel) {
8432 if (!filename)
8433 goto fail;
8434
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008435 /*
8436 * For now, we only support file-based filters
8437 * in per-task events; doing so for CPU-wide
8438 * events requires additional context switching
8439 * trickery, since same object code will be
8440 * mapped at different virtual addresses in
8441 * different processes.
8442 */
8443 ret = -EOPNOTSUPP;
8444 if (!event->ctx->task)
8445 goto fail_free_name;
8446
Alexander Shishkin375637b2016-04-27 18:44:46 +03008447 /* look up the path and grab its inode */
8448 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
8449 if (ret)
8450 goto fail_free_name;
8451
8452 filter->inode = igrab(d_inode(path.dentry));
8453 path_put(&path);
8454 kfree(filename);
8455 filename = NULL;
8456
8457 ret = -EINVAL;
8458 if (!filter->inode ||
8459 !S_ISREG(filter->inode->i_mode))
8460 /* free_filters_list() will iput() */
8461 goto fail;
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008462
8463 event->addr_filters.nr_file_filters++;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008464 }
8465
8466 /* ready to consume more filters */
8467 state = IF_STATE_ACTION;
8468 filter = NULL;
8469 }
8470 }
8471
8472 if (state != IF_STATE_ACTION)
8473 goto fail;
8474
8475 kfree(orig);
8476
8477 return 0;
8478
8479fail_free_name:
8480 kfree(filename);
8481fail:
8482 free_filters_list(filters);
8483 kfree(orig);
8484
8485 return ret;
8486}
8487
8488static int
8489perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
8490{
8491 LIST_HEAD(filters);
8492 int ret;
8493
8494 /*
8495 * Since this is called in perf_ioctl() path, we're already holding
8496 * ctx::mutex.
8497 */
8498 lockdep_assert_held(&event->ctx->mutex);
8499
8500 if (WARN_ON_ONCE(event->parent))
8501 return -EINVAL;
8502
Alexander Shishkin375637b2016-04-27 18:44:46 +03008503 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
8504 if (ret)
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008505 goto fail_clear_files;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008506
8507 ret = event->pmu->addr_filters_validate(&filters);
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008508 if (ret)
8509 goto fail_free_filters;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008510
8511 /* remove existing filters, if any */
8512 perf_addr_filters_splice(event, &filters);
8513
8514 /* install new filters */
8515 perf_event_for_each_child(event, perf_event_addr_filters_apply);
8516
8517 return ret;
Alexander Shishkin6ce77bf2017-01-26 11:40:57 +02008518
8519fail_free_filters:
8520 free_filters_list(&filters);
8521
8522fail_clear_files:
8523 event->addr_filters.nr_file_filters = 0;
8524
8525 return ret;
Alexander Shishkin375637b2016-04-27 18:44:46 +03008526}
8527
Alexander Shishkinc796bbb2016-04-27 18:44:42 +03008528static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8529{
8530 char *filter_str;
8531 int ret = -EINVAL;
8532
Alexander Shishkin375637b2016-04-27 18:44:46 +03008533 if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
8534 !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
8535 !has_addr_filter(event))
Alexander Shishkinc796bbb2016-04-27 18:44:42 +03008536 return -EINVAL;
8537
8538 filter_str = strndup_user(arg, PAGE_SIZE);
8539 if (IS_ERR(filter_str))
8540 return PTR_ERR(filter_str);
8541
8542 if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8543 event->attr.type == PERF_TYPE_TRACEPOINT)
8544 ret = ftrace_profile_set_filter(event, event->attr.config,
8545 filter_str);
Alexander Shishkin375637b2016-04-27 18:44:46 +03008546 else if (has_addr_filter(event))
8547 ret = perf_event_set_addr_filter(event, filter_str);
Alexander Shishkinc796bbb2016-04-27 18:44:42 +03008548
8549 kfree(filter_str);
8550 return ret;
8551}
8552
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008553/*
8554 * hrtimer based swevent callback
8555 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008556
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008557static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008558{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008559 enum hrtimer_restart ret = HRTIMER_RESTART;
8560 struct perf_sample_data data;
8561 struct pt_regs *regs;
8562 struct perf_event *event;
8563 u64 period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008564
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008565 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008566
8567 if (event->state != PERF_EVENT_STATE_ACTIVE)
8568 return HRTIMER_NORESTART;
8569
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008570 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008571
Robert Richterfd0d0002012-04-02 20:19:08 +02008572 perf_sample_data_init(&data, 0, event->hw.last_period);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008573 regs = get_irq_regs();
8574
8575 if (regs && !perf_exclude_event(event, regs)) {
Paul E. McKenney77aeeeb2011-11-10 16:02:52 -08008576 if (!(event->attr.exclude_idle && is_idle_task(current)))
Robert Richter33b07b82012-04-05 18:24:43 +02008577 if (__perf_event_overflow(event, 1, &data, regs))
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008578 ret = HRTIMER_NORESTART;
8579 }
8580
8581 period = max_t(u64, 10000, event->hw.sample_period);
8582 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
8583
8584 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008585}
8586
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008587static void perf_swevent_start_hrtimer(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02008588{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008589 struct hw_perf_event *hwc = &event->hw;
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01008590 s64 period;
8591
8592 if (!is_sampling_event(event))
8593 return;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008594
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01008595 period = local64_read(&hwc->period_left);
8596 if (period) {
8597 if (period < 0)
8598 period = 10000;
Peter Zijlstrafa407f32010-06-24 12:35:12 +02008599
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01008600 local64_set(&hwc->period_left, 0);
8601 } else {
8602 period = max_t(u64, 10000, hwc->sample_period);
8603 }
Thomas Gleixner3497d202015-04-14 21:09:03 +00008604 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
8605 HRTIMER_MODE_REL_PINNED);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008606}
8607
8608static void perf_swevent_cancel_hrtimer(struct perf_event *event)
8609{
8610 struct hw_perf_event *hwc = &event->hw;
8611
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01008612 if (is_sampling_event(event)) {
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008613 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
Peter Zijlstrafa407f32010-06-24 12:35:12 +02008614 local64_set(&hwc->period_left, ktime_to_ns(remaining));
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008615
8616 hrtimer_cancel(&hwc->hrtimer);
8617 }
8618}
8619
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008620static void perf_swevent_init_hrtimer(struct perf_event *event)
8621{
8622 struct hw_perf_event *hwc = &event->hw;
8623
8624 if (!is_sampling_event(event))
8625 return;
8626
8627 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8628 hwc->hrtimer.function = perf_swevent_hrtimer;
8629
8630 /*
8631 * Since hrtimers have a fixed rate, we can do a static freq->period
8632 * mapping and avoid the whole period adjust feedback stuff.
8633 */
8634 if (event->attr.freq) {
8635 long freq = event->attr.sample_freq;
8636
8637 event->attr.sample_period = NSEC_PER_SEC / freq;
8638 hwc->sample_period = event->attr.sample_period;
8639 local64_set(&hwc->period_left, hwc->sample_period);
Namhyung Kim778141e2013-03-18 11:41:46 +09008640 hwc->last_period = hwc->sample_period;
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008641 event->attr.freq = 0;
8642 }
8643}
8644
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008645/*
8646 * Software event: cpu wall time clock
8647 */
8648
8649static void cpu_clock_event_update(struct perf_event *event)
8650{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008651 s64 prev;
8652 u64 now;
8653
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008654 now = local_clock();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008655 prev = local64_xchg(&event->hw.prev_count, now);
8656 local64_add(now - prev, &event->count);
8657}
8658
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008659static void cpu_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008660{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008661 local64_set(&event->hw.prev_count, local_clock());
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008662 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008663}
8664
8665static void cpu_clock_event_stop(struct perf_event *event, int flags)
8666{
8667 perf_swevent_cancel_hrtimer(event);
8668 cpu_clock_event_update(event);
8669}
8670
8671static int cpu_clock_event_add(struct perf_event *event, int flags)
8672{
8673 if (flags & PERF_EF_START)
8674 cpu_clock_event_start(event, flags);
Shaohua Li6a694a62015-02-05 15:55:32 -08008675 perf_event_update_userpage(event);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008676
8677 return 0;
8678}
8679
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008680static void cpu_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008681{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008682 cpu_clock_event_stop(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008683}
8684
8685static void cpu_clock_event_read(struct perf_event *event)
8686{
8687 cpu_clock_event_update(event);
8688}
8689
8690static int cpu_clock_event_init(struct perf_event *event)
8691{
8692 if (event->attr.type != PERF_TYPE_SOFTWARE)
8693 return -ENOENT;
8694
8695 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
8696 return -ENOENT;
8697
Stephane Eranian2481c5f2012-02-09 23:20:59 +01008698 /*
8699 * no branch sampling for software events
8700 */
8701 if (has_branch_stack(event))
8702 return -EOPNOTSUPP;
8703
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008704 perf_swevent_init_hrtimer(event);
8705
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008706 return 0;
8707}
8708
8709static struct pmu perf_cpu_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02008710 .task_ctx_nr = perf_sw_context,
8711
Peter Zijlstra34f43922015-02-20 14:05:38 +01008712 .capabilities = PERF_PMU_CAP_NO_NMI,
8713
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008714 .event_init = cpu_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008715 .add = cpu_clock_event_add,
8716 .del = cpu_clock_event_del,
8717 .start = cpu_clock_event_start,
8718 .stop = cpu_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008719 .read = cpu_clock_event_read,
8720};
8721
8722/*
8723 * Software event: task time clock
8724 */
8725
8726static void task_clock_event_update(struct perf_event *event, u64 now)
8727{
8728 u64 prev;
8729 s64 delta;
8730
8731 prev = local64_xchg(&event->hw.prev_count, now);
8732 delta = now - prev;
8733 local64_add(delta, &event->count);
8734}
8735
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008736static void task_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008737{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008738 local64_set(&event->hw.prev_count, event->ctx->time);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008739 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008740}
8741
8742static void task_clock_event_stop(struct perf_event *event, int flags)
8743{
8744 perf_swevent_cancel_hrtimer(event);
8745 task_clock_event_update(event, event->ctx->time);
8746}
8747
8748static int task_clock_event_add(struct perf_event *event, int flags)
8749{
8750 if (flags & PERF_EF_START)
8751 task_clock_event_start(event, flags);
Shaohua Li6a694a62015-02-05 15:55:32 -08008752 perf_event_update_userpage(event);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008753
8754 return 0;
8755}
8756
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008757static void task_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008758{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008759 task_clock_event_stop(event, PERF_EF_UPDATE);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008760}
8761
8762static void task_clock_event_read(struct perf_event *event)
8763{
Peter Zijlstra768a06e2011-02-22 16:52:24 +01008764 u64 now = perf_clock();
8765 u64 delta = now - event->ctx->timestamp;
8766 u64 time = event->ctx->time + delta;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008767
8768 task_clock_event_update(event, time);
8769}
8770
8771static int task_clock_event_init(struct perf_event *event)
8772{
8773 if (event->attr.type != PERF_TYPE_SOFTWARE)
8774 return -ENOENT;
8775
8776 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
8777 return -ENOENT;
8778
Stephane Eranian2481c5f2012-02-09 23:20:59 +01008779 /*
8780 * no branch sampling for software events
8781 */
8782 if (has_branch_stack(event))
8783 return -EOPNOTSUPP;
8784
Peter Zijlstraba3dd362011-02-15 12:41:46 +01008785 perf_swevent_init_hrtimer(event);
8786
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008787 return 0;
8788}
8789
8790static struct pmu perf_task_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02008791 .task_ctx_nr = perf_sw_context,
8792
Peter Zijlstra34f43922015-02-20 14:05:38 +01008793 .capabilities = PERF_PMU_CAP_NO_NMI,
8794
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008795 .event_init = task_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02008796 .add = task_clock_event_add,
8797 .del = task_clock_event_del,
8798 .start = task_clock_event_start,
8799 .stop = task_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02008800 .read = task_clock_event_read,
8801};
8802
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008803static void perf_pmu_nop_void(struct pmu *pmu)
8804{
8805}
8806
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008807static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
8808{
8809}
8810
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008811static int perf_pmu_nop_int(struct pmu *pmu)
8812{
8813 return 0;
8814}
8815
Geliang Tang18ab2cd2015-09-27 23:25:50 +08008816static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008817
8818static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008819{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008820 __this_cpu_write(nop_txn_flags, flags);
8821
8822 if (flags & ~PERF_PMU_TXN_ADD)
8823 return;
8824
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008825 perf_pmu_disable(pmu);
8826}
8827
8828static int perf_pmu_commit_txn(struct pmu *pmu)
8829{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008830 unsigned int flags = __this_cpu_read(nop_txn_flags);
8831
8832 __this_cpu_write(nop_txn_flags, 0);
8833
8834 if (flags & ~PERF_PMU_TXN_ADD)
8835 return 0;
8836
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008837 perf_pmu_enable(pmu);
8838 return 0;
8839}
8840
8841static void perf_pmu_cancel_txn(struct pmu *pmu)
8842{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07008843 unsigned int flags = __this_cpu_read(nop_txn_flags);
8844
8845 __this_cpu_write(nop_txn_flags, 0);
8846
8847 if (flags & ~PERF_PMU_TXN_ADD)
8848 return;
8849
Peter Zijlstraad5133b2010-06-15 12:22:39 +02008850 perf_pmu_enable(pmu);
8851}
8852
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01008853static int perf_event_idx_default(struct perf_event *event)
8854{
Peter Zijlstrac719f562014-10-21 11:10:21 +02008855 return 0;
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01008856}
8857
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008858/*
8859 * Ensures all contexts with the same task_ctx_nr have the same
8860 * pmu_cpu_context too.
8861 */
Mark Rutland9e317042014-02-10 17:44:18 +00008862static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008863{
8864 struct pmu *pmu;
8865
8866 if (ctxn < 0)
8867 return NULL;
8868
8869 list_for_each_entry(pmu, &pmus, entry) {
8870 if (pmu->task_ctx_nr == ctxn)
8871 return pmu->pmu_cpu_context;
8872 }
8873
8874 return NULL;
8875}
8876
Peter Zijlstra51676952010-12-07 14:18:20 +01008877static void free_pmu_context(struct pmu *pmu)
8878{
Will Deacondf0062b2017-10-03 15:20:50 +01008879 /*
8880 * Static contexts such as perf_sw_context have a global lifetime
8881 * and may be shared between different PMUs. Avoid freeing them
8882 * when a single PMU is going away.
8883 */
8884 if (pmu->task_ctx_nr > perf_invalid_context)
8885 return;
8886
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008887 mutex_lock(&pmus_lock);
Peter Zijlstra51676952010-12-07 14:18:20 +01008888 free_percpu(pmu->pmu_cpu_context);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008889 mutex_unlock(&pmus_lock);
8890}
Alexander Shishkin6e855cd2016-04-27 18:44:48 +03008891
8892/*
8893 * Let userspace know that this PMU supports address range filtering:
8894 */
8895static ssize_t nr_addr_filters_show(struct device *dev,
8896 struct device_attribute *attr,
8897 char *page)
8898{
8899 struct pmu *pmu = dev_get_drvdata(dev);
8900
8901 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
8902}
8903DEVICE_ATTR_RO(nr_addr_filters);
8904
Peter Zijlstra2e80a822010-11-17 23:17:36 +01008905static struct idr pmu_idr;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02008906
Peter Zijlstraabe43402010-11-17 23:17:37 +01008907static ssize_t
8908type_show(struct device *dev, struct device_attribute *attr, char *page)
8909{
8910 struct pmu *pmu = dev_get_drvdata(dev);
8911
8912 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
8913}
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07008914static DEVICE_ATTR_RO(type);
Peter Zijlstraabe43402010-11-17 23:17:37 +01008915
Stephane Eranian62b85632013-04-03 14:21:34 +02008916static ssize_t
8917perf_event_mux_interval_ms_show(struct device *dev,
8918 struct device_attribute *attr,
8919 char *page)
8920{
8921 struct pmu *pmu = dev_get_drvdata(dev);
8922
8923 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
8924}
8925
Peter Zijlstra272325c2015-04-15 11:41:58 +02008926static DEFINE_MUTEX(mux_interval_mutex);
8927
Stephane Eranian62b85632013-04-03 14:21:34 +02008928static ssize_t
8929perf_event_mux_interval_ms_store(struct device *dev,
8930 struct device_attribute *attr,
8931 const char *buf, size_t count)
8932{
8933 struct pmu *pmu = dev_get_drvdata(dev);
8934 int timer, cpu, ret;
8935
8936 ret = kstrtoint(buf, 0, &timer);
8937 if (ret)
8938 return ret;
8939
8940 if (timer < 1)
8941 return -EINVAL;
8942
8943 /* same value, noting to do */
8944 if (timer == pmu->hrtimer_interval_ms)
8945 return count;
8946
Peter Zijlstra272325c2015-04-15 11:41:58 +02008947 mutex_lock(&mux_interval_mutex);
Stephane Eranian62b85632013-04-03 14:21:34 +02008948 pmu->hrtimer_interval_ms = timer;
8949
8950 /* update all cpuctx for this PMU */
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02008951 cpus_read_lock();
Peter Zijlstra272325c2015-04-15 11:41:58 +02008952 for_each_online_cpu(cpu) {
Stephane Eranian62b85632013-04-03 14:21:34 +02008953 struct perf_cpu_context *cpuctx;
8954 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8955 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
8956
Peter Zijlstra272325c2015-04-15 11:41:58 +02008957 cpu_function_call(cpu,
8958 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
Stephane Eranian62b85632013-04-03 14:21:34 +02008959 }
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02008960 cpus_read_unlock();
Peter Zijlstra272325c2015-04-15 11:41:58 +02008961 mutex_unlock(&mux_interval_mutex);
Stephane Eranian62b85632013-04-03 14:21:34 +02008962
8963 return count;
8964}
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07008965static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
Stephane Eranian62b85632013-04-03 14:21:34 +02008966
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07008967static struct attribute *pmu_dev_attrs[] = {
8968 &dev_attr_type.attr,
8969 &dev_attr_perf_event_mux_interval_ms.attr,
8970 NULL,
Peter Zijlstraabe43402010-11-17 23:17:37 +01008971};
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07008972ATTRIBUTE_GROUPS(pmu_dev);
Peter Zijlstraabe43402010-11-17 23:17:37 +01008973
8974static int pmu_bus_running;
8975static struct bus_type pmu_bus = {
8976 .name = "event_source",
Greg Kroah-Hartman90826ca2013-08-23 14:24:40 -07008977 .dev_groups = pmu_dev_groups,
Peter Zijlstraabe43402010-11-17 23:17:37 +01008978};
8979
8980static void pmu_dev_release(struct device *dev)
8981{
8982 kfree(dev);
8983}
8984
8985static int pmu_dev_alloc(struct pmu *pmu)
8986{
8987 int ret = -ENOMEM;
8988
8989 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
8990 if (!pmu->dev)
8991 goto out;
8992
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01008993 pmu->dev->groups = pmu->attr_groups;
Peter Zijlstraabe43402010-11-17 23:17:37 +01008994 device_initialize(pmu->dev);
8995 ret = dev_set_name(pmu->dev, "%s", pmu->name);
8996 if (ret)
8997 goto free_dev;
8998
8999 dev_set_drvdata(pmu->dev, pmu);
9000 pmu->dev->bus = &pmu_bus;
9001 pmu->dev->release = pmu_dev_release;
9002 ret = device_add(pmu->dev);
9003 if (ret)
9004 goto free_dev;
9005
Alexander Shishkin6e855cd2016-04-27 18:44:48 +03009006 /* For PMUs with address filters, throw in an extra attribute: */
9007 if (pmu->nr_addr_filters)
9008 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
9009
9010 if (ret)
9011 goto del_dev;
9012
Peter Zijlstraabe43402010-11-17 23:17:37 +01009013out:
9014 return ret;
9015
Alexander Shishkin6e855cd2016-04-27 18:44:48 +03009016del_dev:
9017 device_del(pmu->dev);
9018
Peter Zijlstraabe43402010-11-17 23:17:37 +01009019free_dev:
9020 put_device(pmu->dev);
9021 goto out;
9022}
9023
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01009024static struct lock_class_key cpuctx_mutex;
Peter Zijlstrafacc4302011-04-09 21:17:42 +02009025static struct lock_class_key cpuctx_lock;
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01009026
Mischa Jonker03d8e802013-06-04 11:45:48 +02009027int perf_pmu_register(struct pmu *pmu, const char *name, int type)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009028{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009029 int cpu, ret;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009030
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009031 mutex_lock(&pmus_lock);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009032 ret = -ENOMEM;
9033 pmu->pmu_disable_count = alloc_percpu(int);
9034 if (!pmu->pmu_disable_count)
9035 goto unlock;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02009036
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009037 pmu->type = -1;
9038 if (!name)
9039 goto skip_type;
9040 pmu->name = name;
9041
9042 if (type < 0) {
Tejun Heo0e9c3be2013-02-27 17:04:55 -08009043 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
9044 if (type < 0) {
9045 ret = type;
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009046 goto free_pdc;
9047 }
9048 }
9049 pmu->type = type;
9050
Peter Zijlstraabe43402010-11-17 23:17:37 +01009051 if (pmu_bus_running) {
9052 ret = pmu_dev_alloc(pmu);
9053 if (ret)
9054 goto free_idr;
9055 }
9056
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009057skip_type:
Peter Zijlstra26657842016-03-22 22:09:18 +01009058 if (pmu->task_ctx_nr == perf_hw_context) {
9059 static int hw_context_taken = 0;
9060
Mark Rutland5101ef22016-04-26 11:33:46 +01009061 /*
9062 * Other than systems with heterogeneous CPUs, it never makes
9063 * sense for two PMUs to share perf_hw_context. PMUs which are
9064 * uncore must use perf_invalid_context.
9065 */
9066 if (WARN_ON_ONCE(hw_context_taken &&
9067 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
Peter Zijlstra26657842016-03-22 22:09:18 +01009068 pmu->task_ctx_nr = perf_invalid_context;
9069
9070 hw_context_taken = 1;
9071 }
9072
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02009073 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
9074 if (pmu->pmu_cpu_context)
9075 goto got_cpu_context;
9076
Wei Yongjunc4814202013-04-12 11:05:54 +08009077 ret = -ENOMEM;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009078 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
9079 if (!pmu->pmu_cpu_context)
Peter Zijlstraabe43402010-11-17 23:17:37 +01009080 goto free_dev;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009081
9082 for_each_possible_cpu(cpu) {
9083 struct perf_cpu_context *cpuctx;
9084
9085 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Peter Zijlstraeb184472010-09-07 15:55:13 +02009086 __perf_event_init_context(&cpuctx->ctx);
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01009087 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
Peter Zijlstrafacc4302011-04-09 21:17:42 +02009088 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009089 cpuctx->ctx.pmu = pmu;
Thomas Gleixnera63fbed2017-05-24 10:15:34 +02009090 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
Stephane Eranian9e630202013-04-03 14:21:33 +02009091
Peter Zijlstra272325c2015-04-15 11:41:58 +02009092 __perf_mux_hrtimer_init(cpuctx, cpu);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009093 }
9094
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +02009095got_cpu_context:
Peter Zijlstraad5133b2010-06-15 12:22:39 +02009096 if (!pmu->start_txn) {
9097 if (pmu->pmu_enable) {
9098 /*
9099 * If we have pmu_enable/pmu_disable calls, install
9100 * transaction stubs that use that to try and batch
9101 * hardware accesses.
9102 */
9103 pmu->start_txn = perf_pmu_start_txn;
9104 pmu->commit_txn = perf_pmu_commit_txn;
9105 pmu->cancel_txn = perf_pmu_cancel_txn;
9106 } else {
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07009107 pmu->start_txn = perf_pmu_nop_txn;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02009108 pmu->commit_txn = perf_pmu_nop_int;
9109 pmu->cancel_txn = perf_pmu_nop_void;
9110 }
9111 }
9112
9113 if (!pmu->pmu_enable) {
9114 pmu->pmu_enable = perf_pmu_nop_void;
9115 pmu->pmu_disable = perf_pmu_nop_void;
9116 }
9117
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01009118 if (!pmu->event_idx)
9119 pmu->event_idx = perf_event_idx_default;
9120
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009121 list_add_rcu(&pmu->entry, &pmus);
Alexander Shishkinbed5b252015-01-30 12:31:06 +02009122 atomic_set(&pmu->exclusive_cnt, 0);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009123 ret = 0;
9124unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009125 mutex_unlock(&pmus_lock);
9126
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009127 return ret;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009128
Peter Zijlstraabe43402010-11-17 23:17:37 +01009129free_dev:
9130 device_del(pmu->dev);
9131 put_device(pmu->dev);
9132
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009133free_idr:
9134 if (pmu->type >= PERF_TYPE_MAX)
9135 idr_remove(&pmu_idr, pmu->type);
9136
Peter Zijlstra108b02c2010-09-06 14:32:03 +02009137free_pdc:
9138 free_percpu(pmu->pmu_disable_count);
9139 goto unlock;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009140}
Yan, Zhengc464c762014-03-18 16:56:41 +08009141EXPORT_SYMBOL_GPL(perf_pmu_register);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009142
9143void perf_pmu_unregister(struct pmu *pmu)
9144{
Jiri Olsa09338402016-10-20 13:10:11 +02009145 int remove_device;
9146
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009147 mutex_lock(&pmus_lock);
Jiri Olsa09338402016-10-20 13:10:11 +02009148 remove_device = pmu_bus_running;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009149 list_del_rcu(&pmu->entry);
9150 mutex_unlock(&pmus_lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009151
9152 /*
Peter Zijlstracde8e882010-09-13 11:06:55 +02009153 * We dereference the pmu list under both SRCU and regular RCU, so
9154 * synchronize against both of those.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009155 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009156 synchronize_srcu(&pmus_srcu);
Peter Zijlstracde8e882010-09-13 11:06:55 +02009157 synchronize_rcu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009158
Peter Zijlstra33696fc2010-06-14 08:49:00 +02009159 free_percpu(pmu->pmu_disable_count);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009160 if (pmu->type >= PERF_TYPE_MAX)
9161 idr_remove(&pmu_idr, pmu->type);
Jiri Olsa09338402016-10-20 13:10:11 +02009162 if (remove_device) {
9163 if (pmu->nr_addr_filters)
9164 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
9165 device_del(pmu->dev);
9166 put_device(pmu->dev);
9167 }
Peter Zijlstra51676952010-12-07 14:18:20 +01009168 free_pmu_context(pmu);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009169}
Yan, Zhengc464c762014-03-18 16:56:41 +08009170EXPORT_SYMBOL_GPL(perf_pmu_unregister);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009171
Mark Rutlandcc34b982015-01-07 14:56:51 +00009172static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
9173{
Peter Zijlstraccd41c82015-02-25 15:56:04 +01009174 struct perf_event_context *ctx = NULL;
Mark Rutlandcc34b982015-01-07 14:56:51 +00009175 int ret;
9176
9177 if (!try_module_get(pmu->module))
9178 return -ENODEV;
Peter Zijlstraccd41c82015-02-25 15:56:04 +01009179
9180 if (event->group_leader != event) {
Peter Zijlstra8b10c5e2015-05-01 16:08:46 +02009181 /*
9182 * This ctx->mutex can nest when we're called through
9183 * inheritance. See the perf_event_ctx_lock_nested() comment.
9184 */
9185 ctx = perf_event_ctx_lock_nested(event->group_leader,
9186 SINGLE_DEPTH_NESTING);
Peter Zijlstraccd41c82015-02-25 15:56:04 +01009187 BUG_ON(!ctx);
9188 }
9189
Mark Rutlandcc34b982015-01-07 14:56:51 +00009190 event->pmu = pmu;
9191 ret = pmu->event_init(event);
Peter Zijlstraccd41c82015-02-25 15:56:04 +01009192
9193 if (ctx)
9194 perf_event_ctx_unlock(event->group_leader, ctx);
9195
Mark Rutlandcc34b982015-01-07 14:56:51 +00009196 if (ret)
9197 module_put(pmu->module);
9198
9199 return ret;
9200}
9201
Geliang Tang18ab2cd2015-09-27 23:25:50 +08009202static struct pmu *perf_init_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009203{
Dan Carpenter85c617a2017-05-22 12:03:49 +03009204 struct pmu *pmu;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009205 int idx;
Lin Ming940c5b22011-02-27 21:13:31 +08009206 int ret;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02009207
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009208 idx = srcu_read_lock(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009209
Kan Liang40999312017-01-18 08:21:01 -05009210 /* Try parent's PMU first: */
9211 if (event->parent && event->parent->pmu) {
9212 pmu = event->parent->pmu;
9213 ret = perf_try_init_event(pmu, event);
9214 if (!ret)
9215 goto unlock;
9216 }
9217
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009218 rcu_read_lock();
9219 pmu = idr_find(&pmu_idr, event->attr.type);
9220 rcu_read_unlock();
Lin Ming940c5b22011-02-27 21:13:31 +08009221 if (pmu) {
Mark Rutlandcc34b982015-01-07 14:56:51 +00009222 ret = perf_try_init_event(pmu, event);
Lin Ming940c5b22011-02-27 21:13:31 +08009223 if (ret)
9224 pmu = ERR_PTR(ret);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009225 goto unlock;
Lin Ming940c5b22011-02-27 21:13:31 +08009226 }
Peter Zijlstra2e80a822010-11-17 23:17:36 +01009227
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009228 list_for_each_entry_rcu(pmu, &pmus, entry) {
Mark Rutlandcc34b982015-01-07 14:56:51 +00009229 ret = perf_try_init_event(pmu, event);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009230 if (!ret)
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02009231 goto unlock;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02009232
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009233 if (ret != -ENOENT) {
9234 pmu = ERR_PTR(ret);
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02009235 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009236 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009237 }
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02009238 pmu = ERR_PTR(-ENOENT);
9239unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009240 srcu_read_unlock(&pmus_srcu, idx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009241
9242 return pmu;
9243}
9244
Kan Liangf2fb6be2016-03-23 11:24:37 -07009245static void attach_sb_event(struct perf_event *event)
9246{
9247 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
9248
9249 raw_spin_lock(&pel->lock);
9250 list_add_rcu(&event->sb_list, &pel->list);
9251 raw_spin_unlock(&pel->lock);
9252}
9253
Peter Zijlstraaab5b712016-05-12 17:26:46 +02009254/*
9255 * We keep a list of all !task (and therefore per-cpu) events
9256 * that need to receive side-band records.
9257 *
9258 * This avoids having to scan all the various PMU per-cpu contexts
9259 * looking for them.
9260 */
Kan Liangf2fb6be2016-03-23 11:24:37 -07009261static void account_pmu_sb_event(struct perf_event *event)
9262{
David Carrillo-Cisnerosa4f144e2016-06-01 12:33:05 -07009263 if (is_sb_event(event))
Kan Liangf2fb6be2016-03-23 11:24:37 -07009264 attach_sb_event(event);
9265}
9266
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009267static void account_event_cpu(struct perf_event *event, int cpu)
9268{
9269 if (event->parent)
9270 return;
9271
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009272 if (is_cgroup_event(event))
9273 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
9274}
9275
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02009276/* Freq events need the tick to stay alive (see perf_event_task_tick). */
9277static void account_freq_event_nohz(void)
9278{
9279#ifdef CONFIG_NO_HZ_FULL
9280 /* Lock so we don't race with concurrent unaccount */
9281 spin_lock(&nr_freq_lock);
9282 if (atomic_inc_return(&nr_freq_events) == 1)
9283 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
9284 spin_unlock(&nr_freq_lock);
9285#endif
9286}
9287
9288static void account_freq_event(void)
9289{
9290 if (tick_nohz_full_enabled())
9291 account_freq_event_nohz();
9292 else
9293 atomic_inc(&nr_freq_events);
9294}
9295
9296
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009297static void account_event(struct perf_event *event)
9298{
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009299 bool inc = false;
9300
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009301 if (event->parent)
9302 return;
9303
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009304 if (event->attach_state & PERF_ATTACH_TASK)
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009305 inc = true;
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009306 if (event->attr.mmap || event->attr.mmap_data)
9307 atomic_inc(&nr_mmap_events);
9308 if (event->attr.comm)
9309 atomic_inc(&nr_comm_events);
Hari Bathinie4222672017-03-08 02:11:36 +05309310 if (event->attr.namespaces)
9311 atomic_inc(&nr_namespaces_events);
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009312 if (event->attr.task)
9313 atomic_inc(&nr_task_events);
Frederic Weisbecker555e0c12015-07-16 17:42:29 +02009314 if (event->attr.freq)
9315 account_freq_event();
Adrian Hunter45ac1402015-07-21 12:44:02 +03009316 if (event->attr.context_switch) {
9317 atomic_inc(&nr_switch_events);
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009318 inc = true;
Adrian Hunter45ac1402015-07-21 12:44:02 +03009319 }
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009320 if (has_branch_stack(event))
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009321 inc = true;
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009322 if (is_cgroup_event(event))
Peter Zijlstra25432ae2016-01-08 11:05:09 +01009323 inc = true;
9324
Peter Zijlstra9107c892016-02-24 18:45:45 +01009325 if (inc) {
Alexander Shishkin5bce9db2017-08-29 17:01:03 +03009326 /*
9327 * We need the mutex here because static_branch_enable()
9328 * must complete *before* the perf_sched_count increment
9329 * becomes visible.
9330 */
Peter Zijlstra9107c892016-02-24 18:45:45 +01009331 if (atomic_inc_not_zero(&perf_sched_count))
9332 goto enabled;
9333
9334 mutex_lock(&perf_sched_mutex);
9335 if (!atomic_read(&perf_sched_count)) {
9336 static_branch_enable(&perf_sched_events);
9337 /*
9338 * Guarantee that all CPUs observe they key change and
9339 * call the perf scheduling hooks before proceeding to
9340 * install events that need them.
9341 */
9342 synchronize_sched();
9343 }
9344 /*
9345 * Now that we have waited for the sync_sched(), allow further
9346 * increments to by-pass the mutex.
9347 */
9348 atomic_inc(&perf_sched_count);
9349 mutex_unlock(&perf_sched_mutex);
9350 }
9351enabled:
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009352
Frederic Weisbecker4beb31f2013-07-23 02:31:02 +02009353 account_event_cpu(event, event->cpu);
Kan Liangf2fb6be2016-03-23 11:24:37 -07009354
9355 account_pmu_sb_event(event);
Frederic Weisbecker766d6c02013-07-23 02:31:01 +02009356}
9357
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009358/*
9359 * Allocate and initialize a event structure
9360 */
9361static struct perf_event *
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02009362perf_event_alloc(struct perf_event_attr *attr, int cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009363 struct task_struct *task,
9364 struct perf_event *group_leader,
9365 struct perf_event *parent_event,
Avi Kivity4dc0da82011-06-29 18:42:35 +03009366 perf_overflow_handler_t overflow_handler,
Matt Fleming79dff512015-01-23 18:45:42 +00009367 void *context, int cgroup_fd)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009368{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02009369 struct pmu *pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009370 struct perf_event *event;
9371 struct hw_perf_event *hwc;
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009372 long err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009373
Oleg Nesterov66832eb2011-01-18 17:10:32 +01009374 if ((unsigned)cpu >= nr_cpu_ids) {
9375 if (!task || cpu != -1)
9376 return ERR_PTR(-EINVAL);
9377 }
9378
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02009379 event = kzalloc(sizeof(*event), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009380 if (!event)
9381 return ERR_PTR(-ENOMEM);
9382
9383 /*
9384 * Single events are their own group leaders, with an
9385 * empty sibling list:
9386 */
9387 if (!group_leader)
9388 group_leader = event;
9389
9390 mutex_init(&event->child_mutex);
9391 INIT_LIST_HEAD(&event->child_list);
9392
9393 INIT_LIST_HEAD(&event->group_entry);
9394 INIT_LIST_HEAD(&event->event_entry);
9395 INIT_LIST_HEAD(&event->sibling_list);
Peter Zijlstra10c6db12011-11-26 02:47:31 +01009396 INIT_LIST_HEAD(&event->rb_entry);
Stephane Eranian71ad88e2013-11-12 17:58:48 +01009397 INIT_LIST_HEAD(&event->active_entry);
Alexander Shishkin375637b2016-04-27 18:44:46 +03009398 INIT_LIST_HEAD(&event->addr_filters.list);
Stephane Eranianf3ae75d2014-01-08 11:15:52 +01009399 INIT_HLIST_NODE(&event->hlist_entry);
9400
Peter Zijlstra10c6db12011-11-26 02:47:31 +01009401
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009402 init_waitqueue_head(&event->waitq);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08009403 init_irq_work(&event->pending, perf_pending_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009404
9405 mutex_init(&event->mmap_mutex);
Alexander Shishkin375637b2016-04-27 18:44:46 +03009406 raw_spin_lock_init(&event->addr_filters.lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009407
Al Viroa6fa9412012-08-20 14:59:25 +01009408 atomic_long_set(&event->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009409 event->cpu = cpu;
9410 event->attr = *attr;
9411 event->group_leader = group_leader;
9412 event->pmu = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009413 event->oncpu = -1;
9414
9415 event->parent = parent_event;
9416
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08009417 event->ns = get_pid_ns(task_active_pid_ns(current));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009418 event->id = atomic64_inc_return(&perf_event_id);
9419
9420 event->state = PERF_EVENT_STATE_INACTIVE;
9421
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009422 if (task) {
9423 event->attach_state = PERF_ATTACH_TASK;
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009424 /*
Peter Zijlstra50f16a82015-03-05 22:10:19 +01009425 * XXX pmu::event_init needs to know what task to account to
9426 * and we cannot use the ctx information because we need the
9427 * pmu before we get a ctx.
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009428 */
Peter Zijlstra50f16a82015-03-05 22:10:19 +01009429 event->hw.target = task;
Peter Zijlstrad580ff82010-10-14 17:43:23 +02009430 }
9431
Peter Zijlstra34f43922015-02-20 14:05:38 +01009432 event->clock = &local_clock;
9433 if (parent_event)
9434 event->clock = parent_event->clock;
9435
Avi Kivity4dc0da82011-06-29 18:42:35 +03009436 if (!overflow_handler && parent_event) {
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01009437 overflow_handler = parent_event->overflow_handler;
Avi Kivity4dc0da82011-06-29 18:42:35 +03009438 context = parent_event->overflow_handler_context;
Arnd Bergmannf1e4ba52016-09-06 15:10:22 +02009439#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
Alexei Starovoitovaa6a5f32016-09-01 18:37:24 -07009440 if (overflow_handler == bpf_overflow_handler) {
9441 struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
9442
9443 if (IS_ERR(prog)) {
9444 err = PTR_ERR(prog);
9445 goto err_ns;
9446 }
9447 event->prog = prog;
9448 event->orig_overflow_handler =
9449 parent_event->orig_overflow_handler;
9450 }
9451#endif
Avi Kivity4dc0da82011-06-29 18:42:35 +03009452 }
Oleg Nesterov66832eb2011-01-18 17:10:32 +01009453
Wang Nan18794452016-03-28 06:41:30 +00009454 if (overflow_handler) {
9455 event->overflow_handler = overflow_handler;
9456 event->overflow_handler_context = context;
Wang Nan9ecda412016-04-05 14:11:18 +00009457 } else if (is_write_backward(event)){
9458 event->overflow_handler = perf_event_output_backward;
9459 event->overflow_handler_context = NULL;
Wang Nan18794452016-03-28 06:41:30 +00009460 } else {
Wang Nan9ecda412016-04-05 14:11:18 +00009461 event->overflow_handler = perf_event_output_forward;
Wang Nan18794452016-03-28 06:41:30 +00009462 event->overflow_handler_context = NULL;
9463 }
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02009464
Jiri Olsa0231bb52013-02-01 11:23:45 +01009465 perf_event__state_init(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009466
9467 pmu = NULL;
9468
9469 hwc = &event->hw;
9470 hwc->sample_period = attr->sample_period;
9471 if (attr->freq && attr->sample_freq)
9472 hwc->sample_period = 1;
9473 hwc->last_period = hwc->sample_period;
9474
Peter Zijlstrae7850592010-05-21 14:43:08 +02009475 local64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009476
9477 /*
Peter Zijlstraba5213a2017-05-30 11:45:12 +02009478 * We currently do not support PERF_SAMPLE_READ on inherited events.
9479 * See perf_output_read().
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009480 */
Peter Zijlstraba5213a2017-05-30 11:45:12 +02009481 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009482 goto err_ns;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009483
Yan, Zhenga46a2302014-11-04 21:56:06 -05009484 if (!has_branch_stack(event))
9485 event->attr.branch_sample_type = 0;
9486
Matt Fleming79dff512015-01-23 18:45:42 +00009487 if (cgroup_fd != -1) {
9488 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
9489 if (err)
9490 goto err_ns;
9491 }
9492
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02009493 pmu = perf_init_event(event);
Dan Carpenter85c617a2017-05-22 12:03:49 +03009494 if (IS_ERR(pmu)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009495 err = PTR_ERR(pmu);
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009496 goto err_ns;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009497 }
9498
Alexander Shishkinbed5b252015-01-30 12:31:06 +02009499 err = exclusive_event_init(event);
9500 if (err)
9501 goto err_pmu;
9502
Alexander Shishkin375637b2016-04-27 18:44:46 +03009503 if (has_addr_filter(event)) {
9504 event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
9505 sizeof(unsigned long),
9506 GFP_KERNEL);
Dan Carpenter36cc2b92017-05-22 12:04:18 +03009507 if (!event->addr_filters_offs) {
9508 err = -ENOMEM;
Alexander Shishkin375637b2016-04-27 18:44:46 +03009509 goto err_per_task;
Dan Carpenter36cc2b92017-05-22 12:04:18 +03009510 }
Alexander Shishkin375637b2016-04-27 18:44:46 +03009511
9512 /* force hw sync on the address filters */
9513 event->addr_filters_gen = 1;
9514 }
9515
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009516 if (!event->parent) {
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02009517 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
Arnaldo Carvalho de Melo97c79a32016-04-28 13:16:33 -03009518 err = get_callchain_buffers(attr->sample_max_stack);
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009519 if (err)
Alexander Shishkin375637b2016-04-27 18:44:46 +03009520 goto err_addr_filters;
Stephane Eraniand010b332012-02-09 23:21:00 +01009521 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009522 }
9523
Alexander Shishkin927a5572016-03-02 13:24:14 +02009524 /* symmetric to unaccount_event() in _free_event() */
9525 account_event(event);
9526
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009527 return event;
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009528
Alexander Shishkin375637b2016-04-27 18:44:46 +03009529err_addr_filters:
9530 kfree(event->addr_filters_offs);
9531
Alexander Shishkinbed5b252015-01-30 12:31:06 +02009532err_per_task:
9533 exclusive_event_destroy(event);
9534
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009535err_pmu:
9536 if (event->destroy)
9537 event->destroy(event);
Yan, Zhengc464c762014-03-18 16:56:41 +08009538 module_put(pmu->module);
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009539err_ns:
Matt Fleming79dff512015-01-23 18:45:42 +00009540 if (is_cgroup_event(event))
9541 perf_detach_cgroup(event);
Frederic Weisbecker90983b12013-07-23 02:31:00 +02009542 if (event->ns)
9543 put_pid_ns(event->ns);
9544 kfree(event);
9545
9546 return ERR_PTR(err);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009547}
9548
9549static int perf_copy_attr(struct perf_event_attr __user *uattr,
9550 struct perf_event_attr *attr)
9551{
9552 u32 size;
9553 int ret;
9554
9555 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
9556 return -EFAULT;
9557
9558 /*
9559 * zero the full structure, so that a short copy will be nice.
9560 */
9561 memset(attr, 0, sizeof(*attr));
9562
9563 ret = get_user(size, &uattr->size);
9564 if (ret)
9565 return ret;
9566
9567 if (size > PAGE_SIZE) /* silly large */
9568 goto err_size;
9569
9570 if (!size) /* abi compat */
9571 size = PERF_ATTR_SIZE_VER0;
9572
9573 if (size < PERF_ATTR_SIZE_VER0)
9574 goto err_size;
9575
9576 /*
9577 * If we're handed a bigger struct than we know of,
9578 * ensure all the unknown bits are 0 - i.e. new
9579 * user-space does not rely on any kernel feature
9580 * extensions we dont know about yet.
9581 */
9582 if (size > sizeof(*attr)) {
9583 unsigned char __user *addr;
9584 unsigned char __user *end;
9585 unsigned char val;
9586
9587 addr = (void __user *)uattr + sizeof(*attr);
9588 end = (void __user *)uattr + size;
9589
9590 for (; addr < end; addr++) {
9591 ret = get_user(val, addr);
9592 if (ret)
9593 return ret;
9594 if (val)
9595 goto err_size;
9596 }
9597 size = sizeof(*attr);
9598 }
9599
9600 ret = copy_from_user(attr, uattr, size);
9601 if (ret)
9602 return -EFAULT;
9603
Meng Xuf12f42a2017-08-23 17:07:50 -04009604 attr->size = size;
9605
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05309606 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009607 return -EINVAL;
9608
9609 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
9610 return -EINVAL;
9611
9612 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
9613 return -EINVAL;
9614
Stephane Eranianbce38cd2012-02-09 23:20:51 +01009615 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
9616 u64 mask = attr->branch_sample_type;
9617
9618 /* only using defined bits */
9619 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
9620 return -EINVAL;
9621
9622 /* at least one branch bit must be set */
9623 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
9624 return -EINVAL;
9625
Stephane Eranianbce38cd2012-02-09 23:20:51 +01009626 /* propagate priv level, when not set for branch */
9627 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
9628
9629 /* exclude_kernel checked on syscall entry */
9630 if (!attr->exclude_kernel)
9631 mask |= PERF_SAMPLE_BRANCH_KERNEL;
9632
9633 if (!attr->exclude_user)
9634 mask |= PERF_SAMPLE_BRANCH_USER;
9635
9636 if (!attr->exclude_hv)
9637 mask |= PERF_SAMPLE_BRANCH_HV;
9638 /*
9639 * adjust user setting (for HW filter setup)
9640 */
9641 attr->branch_sample_type = mask;
9642 }
Stephane Eraniane7122092013-06-06 11:02:04 +02009643 /* privileged levels capture (kernel, hv): check permissions */
9644 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
Stephane Eranian2b923c82013-05-21 12:53:37 +02009645 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9646 return -EACCES;
Stephane Eranianbce38cd2012-02-09 23:20:51 +01009647 }
Jiri Olsa40189942012-08-07 15:20:37 +02009648
Jiri Olsac5ebced2012-08-07 15:20:40 +02009649 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
Jiri Olsa40189942012-08-07 15:20:37 +02009650 ret = perf_reg_validate(attr->sample_regs_user);
Jiri Olsac5ebced2012-08-07 15:20:40 +02009651 if (ret)
9652 return ret;
9653 }
9654
9655 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
9656 if (!arch_perf_have_user_stack_dump())
9657 return -ENOSYS;
9658
9659 /*
9660 * We have __u32 type for the size, but so far
9661 * we can only use __u16 as maximum due to the
9662 * __u16 sample size limit.
9663 */
9664 if (attr->sample_stack_user >= USHRT_MAX)
9665 ret = -EINVAL;
9666 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
9667 ret = -EINVAL;
9668 }
Jiri Olsa40189942012-08-07 15:20:37 +02009669
Stephane Eranian60e23642014-09-24 13:48:37 +02009670 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
9671 ret = perf_reg_validate(attr->sample_regs_intr);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009672out:
9673 return ret;
9674
9675err_size:
9676 put_user(sizeof(*attr), &uattr->size);
9677 ret = -E2BIG;
9678 goto out;
9679}
9680
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009681static int
9682perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009683{
Peter Zijlstrab69cf532014-03-14 10:50:33 +01009684 struct ring_buffer *rb = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009685 int ret = -EINVAL;
9686
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009687 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009688 goto set;
9689
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009690 /* don't allow circular references */
9691 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009692 goto out;
9693
Peter Zijlstra0f139302010-05-20 14:35:15 +02009694 /*
9695 * Don't allow cross-cpu buffers
9696 */
9697 if (output_event->cpu != event->cpu)
9698 goto out;
9699
9700 /*
Frederic Weisbecker76369132011-05-19 19:55:04 +02009701 * If its not a per-cpu rb, it must be the same task.
Peter Zijlstra0f139302010-05-20 14:35:15 +02009702 */
9703 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
9704 goto out;
9705
Peter Zijlstra34f43922015-02-20 14:05:38 +01009706 /*
9707 * Mixing clocks in the same buffer is trouble you don't need.
9708 */
9709 if (output_event->clock != event->clock)
9710 goto out;
9711
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02009712 /*
Wang Nan9ecda412016-04-05 14:11:18 +00009713 * Either writing ring buffer from beginning or from end.
9714 * Mixing is not allowed.
9715 */
9716 if (is_write_backward(output_event) != is_write_backward(event))
9717 goto out;
9718
9719 /*
Peter Zijlstra45bfb2e2015-01-14 14:18:11 +02009720 * If both events generate aux data, they must be on the same PMU
9721 */
9722 if (has_aux(event) && has_aux(output_event) &&
9723 event->pmu != output_event->pmu)
9724 goto out;
9725
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009726set:
9727 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009728 /* Can't redirect output if we've got an active mmap() */
9729 if (atomic_read(&event->mmap_count))
9730 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009731
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009732 if (output_event) {
Frederic Weisbecker76369132011-05-19 19:55:04 +02009733 /* get the rb we want to redirect to */
9734 rb = ring_buffer_get(output_event);
9735 if (!rb)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009736 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009737 }
9738
Peter Zijlstrab69cf532014-03-14 10:50:33 +01009739 ring_buffer_attach(event, rb);
Peter Zijlstra9bb5d402013-06-04 10:44:21 +02009740
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009741 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009742unlock:
9743 mutex_unlock(&event->mmap_mutex);
9744
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009745out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009746 return ret;
9747}
9748
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01009749static void mutex_lock_double(struct mutex *a, struct mutex *b)
9750{
9751 if (b < a)
9752 swap(a, b);
9753
9754 mutex_lock(a);
9755 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
9756}
9757
Peter Zijlstra34f43922015-02-20 14:05:38 +01009758static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9759{
9760 bool nmi_safe = false;
9761
9762 switch (clk_id) {
9763 case CLOCK_MONOTONIC:
9764 event->clock = &ktime_get_mono_fast_ns;
9765 nmi_safe = true;
9766 break;
9767
9768 case CLOCK_MONOTONIC_RAW:
9769 event->clock = &ktime_get_raw_fast_ns;
9770 nmi_safe = true;
9771 break;
9772
9773 case CLOCK_REALTIME:
9774 event->clock = &ktime_get_real_ns;
9775 break;
9776
9777 case CLOCK_BOOTTIME:
9778 event->clock = &ktime_get_boot_ns;
9779 break;
9780
9781 case CLOCK_TAI:
9782 event->clock = &ktime_get_tai_ns;
9783 break;
9784
9785 default:
9786 return -EINVAL;
9787 }
9788
9789 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
9790 return -EINVAL;
9791
9792 return 0;
9793}
9794
Peter Zijlstra321027c2017-01-11 21:09:50 +01009795/*
9796 * Variation on perf_event_ctx_lock_nested(), except we take two context
9797 * mutexes.
9798 */
9799static struct perf_event_context *
9800__perf_event_ctx_lock_double(struct perf_event *group_leader,
9801 struct perf_event_context *ctx)
9802{
9803 struct perf_event_context *gctx;
9804
9805again:
9806 rcu_read_lock();
9807 gctx = READ_ONCE(group_leader->ctx);
9808 if (!atomic_inc_not_zero(&gctx->refcount)) {
9809 rcu_read_unlock();
9810 goto again;
9811 }
9812 rcu_read_unlock();
9813
9814 mutex_lock_double(&gctx->mutex, &ctx->mutex);
9815
9816 if (group_leader->ctx != gctx) {
9817 mutex_unlock(&ctx->mutex);
9818 mutex_unlock(&gctx->mutex);
9819 put_ctx(gctx);
9820 goto again;
9821 }
9822
9823 return gctx;
9824}
9825
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009826/**
9827 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9828 *
9829 * @attr_uptr: event_id type attributes for monitoring/sampling
9830 * @pid: target pid
9831 * @cpu: target cpu
9832 * @group_fd: group leader event fd
9833 */
9834SYSCALL_DEFINE5(perf_event_open,
9835 struct perf_event_attr __user *, attr_uptr,
9836 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
9837{
Peter Zijlstrab04243e2010-09-17 11:28:48 +02009838 struct perf_event *group_leader = NULL, *output_event = NULL;
9839 struct perf_event *event, *sibling;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009840 struct perf_event_attr attr;
Peter Zijlstraf63a8da2015-01-23 12:24:14 +01009841 struct perf_event_context *ctx, *uninitialized_var(gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009842 struct file *event_file = NULL;
Al Viro2903ff02012-08-28 12:52:22 -04009843 struct fd group = {NULL, 0};
Matt Helsley38a81da2010-09-13 13:01:20 -07009844 struct task_struct *task = NULL;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02009845 struct pmu *pmu;
Al Viroea635c62010-05-26 17:40:29 -04009846 int event_fd;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02009847 int move_group = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009848 int err;
Yann Droneauda21b0b32014-01-05 21:36:33 +01009849 int f_flags = O_RDWR;
Matt Fleming79dff512015-01-23 18:45:42 +00009850 int cgroup_fd = -1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009851
9852 /* for future expandability... */
Stephane Eraniane5d13672011-02-14 11:20:01 +02009853 if (flags & ~PERF_FLAG_ALL)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009854 return -EINVAL;
9855
9856 err = perf_copy_attr(attr_uptr, &attr);
9857 if (err)
9858 return err;
9859
9860 if (!attr.exclude_kernel) {
9861 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9862 return -EACCES;
9863 }
9864
Hari Bathinie4222672017-03-08 02:11:36 +05309865 if (attr.namespaces) {
9866 if (!capable(CAP_SYS_ADMIN))
9867 return -EACCES;
9868 }
9869
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009870 if (attr.freq) {
9871 if (attr.sample_freq > sysctl_perf_event_sample_rate)
9872 return -EINVAL;
Peter Zijlstra0819b2e2014-05-15 20:23:48 +02009873 } else {
9874 if (attr.sample_period & (1ULL << 63))
9875 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009876 }
9877
Kan Liangfc7ce9c2017-08-28 20:52:49 -04009878 /* Only privileged users can get physical addresses */
9879 if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR) &&
9880 perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9881 return -EACCES;
9882
Arnaldo Carvalho de Melo97c79a32016-04-28 13:16:33 -03009883 if (!attr.sample_max_stack)
9884 attr.sample_max_stack = sysctl_perf_event_max_stack;
9885
Stephane Eraniane5d13672011-02-14 11:20:01 +02009886 /*
9887 * In cgroup mode, the pid argument is used to pass the fd
9888 * opened to the cgroup directory in cgroupfs. The cpu argument
9889 * designates the cpu on which to monitor threads from that
9890 * cgroup.
9891 */
9892 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
9893 return -EINVAL;
9894
Yann Droneauda21b0b32014-01-05 21:36:33 +01009895 if (flags & PERF_FLAG_FD_CLOEXEC)
9896 f_flags |= O_CLOEXEC;
9897
9898 event_fd = get_unused_fd_flags(f_flags);
Al Viroea635c62010-05-26 17:40:29 -04009899 if (event_fd < 0)
9900 return event_fd;
9901
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009902 if (group_fd != -1) {
Al Viro2903ff02012-08-28 12:52:22 -04009903 err = perf_fget_light(group_fd, &group);
9904 if (err)
Stephane Eraniand14b12d2010-09-17 11:28:47 +02009905 goto err_fd;
Al Viro2903ff02012-08-28 12:52:22 -04009906 group_leader = group.file->private_data;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02009907 if (flags & PERF_FLAG_FD_OUTPUT)
9908 output_event = group_leader;
9909 if (flags & PERF_FLAG_FD_NO_GROUP)
9910 group_leader = NULL;
9911 }
9912
Stephane Eraniane5d13672011-02-14 11:20:01 +02009913 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02009914 task = find_lively_task_by_vpid(pid);
9915 if (IS_ERR(task)) {
9916 err = PTR_ERR(task);
9917 goto err_group_fd;
9918 }
9919 }
9920
Peter Zijlstra1f4ee502014-05-06 09:59:34 +02009921 if (task && group_leader &&
9922 group_leader->attr.inherit != attr.inherit) {
9923 err = -EINVAL;
9924 goto err_task;
9925 }
9926
Peter Zijlstra79c9ce52016-04-26 11:36:53 +02009927 if (task) {
9928 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
9929 if (err)
Alexander Levine5aeee52017-06-03 03:39:13 +00009930 goto err_task;
Peter Zijlstra79c9ce52016-04-26 11:36:53 +02009931
9932 /*
9933 * Reuse ptrace permission checks for now.
9934 *
9935 * We must hold cred_guard_mutex across this and any potential
9936 * perf_install_in_context() call for this new event to
9937 * serialize against exec() altering our credentials (and the
9938 * perf_event_exit_task() that could imply).
9939 */
9940 err = -EACCES;
9941 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
9942 goto err_cred;
9943 }
9944
Matt Fleming79dff512015-01-23 18:45:42 +00009945 if (flags & PERF_FLAG_PID_CGROUP)
9946 cgroup_fd = pid;
9947
Avi Kivity4dc0da82011-06-29 18:42:35 +03009948 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
Matt Fleming79dff512015-01-23 18:45:42 +00009949 NULL, NULL, cgroup_fd);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02009950 if (IS_ERR(event)) {
9951 err = PTR_ERR(event);
Peter Zijlstra79c9ce52016-04-26 11:36:53 +02009952 goto err_cred;
Stephane Eraniand14b12d2010-09-17 11:28:47 +02009953 }
9954
Vince Weaver53b25332014-05-16 17:12:12 -04009955 if (is_sampling_event(event)) {
9956 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
Vineet Guptaa1396552016-05-09 15:07:40 +05309957 err = -EOPNOTSUPP;
Vince Weaver53b25332014-05-16 17:12:12 -04009958 goto err_alloc;
9959 }
9960 }
9961
Ingo Molnarcdd6c482009-09-21 12:02:48 +02009962 /*
Peter Zijlstra89a1e182010-09-07 17:34:50 +02009963 * Special case software events and allow them to be part of
9964 * any hardware group.
9965 */
9966 pmu = event->pmu;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02009967
Peter Zijlstra34f43922015-02-20 14:05:38 +01009968 if (attr.use_clockid) {
9969 err = perf_event_set_clock(event, attr.clockid);
9970 if (err)
9971 goto err_alloc;
9972 }
9973
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07009974 if (pmu->task_ctx_nr == perf_sw_context)
9975 event->event_caps |= PERF_EV_CAP_SOFTWARE;
9976
Peter Zijlstrab04243e2010-09-17 11:28:48 +02009977 if (group_leader &&
9978 (is_software_event(event) != is_software_event(group_leader))) {
9979 if (is_software_event(event)) {
9980 /*
9981 * If event and group_leader are not both a software
9982 * event, and event is, then group leader is not.
9983 *
9984 * Allow the addition of software events to !software
9985 * groups, this is safe because software events never
9986 * fail to schedule.
9987 */
9988 pmu = group_leader->pmu;
9989 } else if (is_software_event(group_leader) &&
David Carrillo-Cisneros4ff6a8d2016-08-17 13:55:05 -07009990 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
Peter Zijlstrab04243e2010-09-17 11:28:48 +02009991 /*
9992 * In case the group is a pure software group, and we
9993 * try to add a hardware event, move the whole group to
9994 * the hardware context.
9995 */
9996 move_group = 1;
9997 }
9998 }
Peter Zijlstra89a1e182010-09-07 17:34:50 +02009999
10000 /*
10001 * Get the target context (task or percpu):
10002 */
Yan, Zheng4af57ef2014-11-04 21:56:01 -050010003 ctx = find_get_context(pmu, task, event);
Peter Zijlstra89a1e182010-09-07 17:34:50 +020010004 if (IS_ERR(ctx)) {
10005 err = PTR_ERR(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +020010006 goto err_alloc;
Peter Zijlstra89a1e182010-09-07 17:34:50 +020010007 }
10008
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010009 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
10010 err = -EBUSY;
10011 goto err_context;
10012 }
10013
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010014 /*
10015 * Look up the group leader (we will attach this event to it):
10016 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +020010017 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010018 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010019
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010020 /*
10021 * Do not allow a recursive hierarchy (this new sibling
10022 * becoming part of another group-sibling):
10023 */
10024 if (group_leader->group_leader != group_leader)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010025 goto err_context;
Peter Zijlstra34f43922015-02-20 14:05:38 +010010026
10027 /* All events in a group should have the same clock */
10028 if (group_leader->clock != event->clock)
10029 goto err_context;
10030
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010031 /*
Mark Rutland64aee2a2017-06-22 15:41:38 +010010032 * Make sure we're both events for the same CPU;
10033 * grouping events for different CPUs is broken; since
10034 * you can never concurrently schedule them anyhow.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010035 */
Mark Rutland64aee2a2017-06-22 15:41:38 +010010036 if (group_leader->cpu != event->cpu)
10037 goto err_context;
Peter Zijlstrac3c87e72015-01-23 11:19:48 +010010038
Mark Rutland64aee2a2017-06-22 15:41:38 +010010039 /*
10040 * Make sure we're both on the same task, or both
10041 * per-CPU events.
10042 */
10043 if (group_leader->ctx->task != ctx->task)
10044 goto err_context;
10045
10046 /*
10047 * Do not allow to attach to a group in a different task
10048 * or CPU context. If we're moving SW events, we'll fix
10049 * this up later, so allow that.
10050 */
10051 if (!move_group && group_leader->ctx != ctx)
10052 goto err_context;
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010053
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010054 /*
10055 * Only a group leader can be exclusive or pinned
10056 */
10057 if (attr.exclusive || attr.pinned)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010058 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +020010059 }
10060
10061 if (output_event) {
10062 err = perf_event_set_output(event, output_event);
10063 if (err)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010064 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +020010065 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010066
Yann Droneauda21b0b32014-01-05 21:36:33 +010010067 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
10068 f_flags);
Al Viroea635c62010-05-26 17:40:29 -040010069 if (IS_ERR(event_file)) {
10070 err = PTR_ERR(event_file);
Alexander Shishkin201c2f82016-03-21 10:02:42 +020010071 event_file = NULL;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010072 goto err_context;
Al Viroea635c62010-05-26 17:40:29 -040010073 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010074
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010075 if (move_group) {
Peter Zijlstra321027c2017-01-11 21:09:50 +010010076 gctx = __perf_event_ctx_lock_double(group_leader, ctx);
10077
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010078 if (gctx->task == TASK_TOMBSTONE) {
10079 err = -ESRCH;
10080 goto err_locked;
10081 }
Peter Zijlstra321027c2017-01-11 21:09:50 +010010082
10083 /*
10084 * Check if we raced against another sys_perf_event_open() call
10085 * moving the software group underneath us.
10086 */
10087 if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
10088 /*
10089 * If someone moved the group out from under us, check
10090 * if this new event wound up on the same ctx, if so
10091 * its the regular !move_group case, otherwise fail.
10092 */
10093 if (gctx != ctx) {
10094 err = -EINVAL;
10095 goto err_locked;
10096 } else {
10097 perf_event_ctx_unlock(group_leader, gctx);
10098 move_group = 0;
10099 }
10100 }
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010101 } else {
10102 mutex_lock(&ctx->mutex);
10103 }
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010104
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010105 if (ctx->task == TASK_TOMBSTONE) {
10106 err = -ESRCH;
10107 goto err_locked;
10108 }
10109
Peter Zijlstraa7239682015-09-09 19:06:33 +020010110 if (!perf_event_validate_size(event)) {
10111 err = -E2BIG;
10112 goto err_locked;
10113 }
10114
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020010115 if (!task) {
10116 /*
10117 * Check if the @cpu we're creating an event for is online.
10118 *
10119 * We use the perf_cpu_context::ctx::mutex to serialize against
10120 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
10121 */
10122 struct perf_cpu_context *cpuctx =
10123 container_of(ctx, struct perf_cpu_context, ctx);
10124
10125 if (!cpuctx->online) {
10126 err = -ENODEV;
10127 goto err_locked;
10128 }
10129 }
10130
10131
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010132 /*
10133 * Must be under the same ctx::mutex as perf_install_in_context(),
10134 * because we need to serialize with concurrent event creation.
10135 */
10136 if (!exclusive_event_installable(event, ctx)) {
10137 /* exclusive and group stuff are assumed mutually exclusive */
10138 WARN_ON_ONCE(move_group);
10139
10140 err = -EBUSY;
10141 goto err_locked;
10142 }
10143
10144 WARN_ON_ONCE(ctx->parent_ctx);
10145
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010146 /*
10147 * This is the point on no return; we cannot fail hereafter. This is
10148 * where we start modifying current state.
10149 */
10150
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010151 if (move_group) {
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010152 /*
10153 * See perf_event_ctx_lock() for comments on the details
10154 * of swizzling perf_event::ctx.
10155 */
Peter Zijlstra45a0e072016-01-26 13:09:48 +010010156 perf_remove_from_context(group_leader, 0);
Peter Zijlstra279b5162017-02-16 10:28:37 +010010157 put_ctx(gctx);
Jiri Olsa0231bb52013-02-01 11:23:45 +010010158
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010159 list_for_each_entry(sibling, &group_leader->sibling_list,
10160 group_entry) {
Peter Zijlstra45a0e072016-01-26 13:09:48 +010010161 perf_remove_from_context(sibling, 0);
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010162 put_ctx(gctx);
10163 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010164
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010165 /*
10166 * Wait for everybody to stop referencing the events through
10167 * the old lists, before installing it on new lists.
10168 */
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010169 synchronize_rcu();
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010170
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010171 /*
10172 * Install the group siblings before the group leader.
10173 *
10174 * Because a group leader will try and install the entire group
10175 * (through the sibling list, which is still in-tact), we can
10176 * end up with siblings installed in the wrong context.
10177 *
10178 * By installing siblings first we NO-OP because they're not
10179 * reachable through the group lists.
10180 */
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010181 list_for_each_entry(sibling, &group_leader->sibling_list,
10182 group_entry) {
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010183 perf_event__state_init(sibling);
Jiri Olsa9fc81d82014-12-10 21:23:51 +010010184 perf_install_in_context(ctx, sibling, sibling->cpu);
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010185 get_ctx(ctx);
10186 }
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010187
10188 /*
10189 * Removing from the context ends up with disabled
10190 * event. What we want here is event in the initial
10191 * startup state, ready to be add into new context.
10192 */
10193 perf_event__state_init(group_leader);
10194 perf_install_in_context(ctx, group_leader, group_leader->cpu);
10195 get_ctx(ctx);
Peter Zijlstrab04243e2010-09-17 11:28:48 +020010196 }
10197
Peter Zijlstraf73e22a2015-09-09 20:48:22 +020010198 /*
10199 * Precalculate sample_data sizes; do while holding ctx::mutex such
10200 * that we're serialized against further additions and before
10201 * perf_install_in_context() which is the point the event is active and
10202 * can use these values.
10203 */
10204 perf_event__header_size(event);
10205 perf_event__id_header_size(event);
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010206
Peter Zijlstra78cd2c72016-01-25 14:08:45 +010010207 event->owner = current;
10208
Yan, Zhenge2d37cd2012-06-15 14:31:32 +080010209 perf_install_in_context(ctx, event, event->cpu);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010210 perf_unpin_context(ctx);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010211
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010212 if (move_group)
Peter Zijlstra321027c2017-01-11 21:09:50 +010010213 perf_event_ctx_unlock(group_leader, gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010214 mutex_unlock(&ctx->mutex);
10215
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010216 if (task) {
10217 mutex_unlock(&task->signal->cred_guard_mutex);
10218 put_task_struct(task);
10219 }
10220
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010221 mutex_lock(&current->perf_event_mutex);
10222 list_add_tail(&event->owner_entry, &current->perf_event_list);
10223 mutex_unlock(&current->perf_event_mutex);
10224
Peter Zijlstra8a495422010-05-27 15:47:49 +020010225 /*
10226 * Drop the reference on the group_event after placing the
10227 * new event on the sibling_list. This ensures destruction
10228 * of the group leader will find the pointer to itself in
10229 * perf_group_detach().
10230 */
Al Viro2903ff02012-08-28 12:52:22 -040010231 fdput(group);
Al Viroea635c62010-05-26 17:40:29 -040010232 fd_install(event_fd, event_file);
10233 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010234
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010235err_locked:
10236 if (move_group)
Peter Zijlstra321027c2017-01-11 21:09:50 +010010237 perf_event_ctx_unlock(group_leader, gctx);
Peter Zijlstraf55fc2a2015-09-09 19:06:33 +020010238 mutex_unlock(&ctx->mutex);
10239/* err_file: */
10240 fput(event_file);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010241err_context:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010242 perf_unpin_context(ctx);
Al Viroea635c62010-05-26 17:40:29 -040010243 put_ctx(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +020010244err_alloc:
Peter Zijlstra13005622016-02-24 18:45:41 +010010245 /*
10246 * If event_file is set, the fput() above will have called ->release()
10247 * and that will take care of freeing the event.
10248 */
10249 if (!event_file)
10250 free_event(event);
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010251err_cred:
10252 if (task)
10253 mutex_unlock(&task->signal->cred_guard_mutex);
Peter Zijlstra1f4ee502014-05-06 09:59:34 +020010254err_task:
Peter Zijlstrae7d0bc02010-10-14 16:54:51 +020010255 if (task)
10256 put_task_struct(task);
Peter Zijlstra89a1e182010-09-07 17:34:50 +020010257err_group_fd:
Al Viro2903ff02012-08-28 12:52:22 -040010258 fdput(group);
Al Viroea635c62010-05-26 17:40:29 -040010259err_fd:
10260 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010261 return err;
10262}
10263
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010264/**
10265 * perf_event_create_kernel_counter
10266 *
10267 * @attr: attributes of the counter to create
10268 * @cpu: cpu in which the counter is bound
Matt Helsley38a81da2010-09-13 13:01:20 -070010269 * @task: task to profile (NULL for percpu)
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010270 */
10271struct perf_event *
10272perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Matt Helsley38a81da2010-09-13 13:01:20 -070010273 struct task_struct *task,
Avi Kivity4dc0da82011-06-29 18:42:35 +030010274 perf_overflow_handler_t overflow_handler,
10275 void *context)
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010276{
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010277 struct perf_event_context *ctx;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010278 struct perf_event *event;
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010279 int err;
10280
10281 /*
10282 * Get the target context (task or percpu):
10283 */
10284
Avi Kivity4dc0da82011-06-29 18:42:35 +030010285 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
Matt Fleming79dff512015-01-23 18:45:42 +000010286 overflow_handler, context, -1);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +010010287 if (IS_ERR(event)) {
10288 err = PTR_ERR(event);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010289 goto err;
10290 }
10291
Jiri Olsaf8697762014-08-01 14:33:01 +020010292 /* Mark owner so we could distinguish it from user events. */
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010293 event->owner = TASK_TOMBSTONE;
Jiri Olsaf8697762014-08-01 14:33:01 +020010294
Yan, Zheng4af57ef2014-11-04 21:56:01 -050010295 ctx = find_get_context(event->pmu, task, event);
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010296 if (IS_ERR(ctx)) {
10297 err = PTR_ERR(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010298 goto err_free;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +010010299 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010300
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010301 WARN_ON_ONCE(ctx->parent_ctx);
10302 mutex_lock(&ctx->mutex);
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010303 if (ctx->task == TASK_TOMBSTONE) {
10304 err = -ESRCH;
10305 goto err_unlock;
10306 }
10307
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020010308 if (!task) {
10309 /*
10310 * Check if the @cpu we're creating an event for is online.
10311 *
10312 * We use the perf_cpu_context::ctx::mutex to serialize against
10313 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
10314 */
10315 struct perf_cpu_context *cpuctx =
10316 container_of(ctx, struct perf_cpu_context, ctx);
10317 if (!cpuctx->online) {
10318 err = -ENODEV;
10319 goto err_unlock;
10320 }
10321 }
10322
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010323 if (!exclusive_event_installable(event, ctx)) {
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010324 err = -EBUSY;
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010325 goto err_unlock;
Alexander Shishkinbed5b252015-01-30 12:31:06 +020010326 }
10327
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010328 perf_install_in_context(ctx, event, cpu);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010329 perf_unpin_context(ctx);
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010330 mutex_unlock(&ctx->mutex);
10331
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010332 return event;
10333
Peter Zijlstra84c4e622016-02-24 18:45:40 +010010334err_unlock:
10335 mutex_unlock(&ctx->mutex);
10336 perf_unpin_context(ctx);
10337 put_ctx(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +020010338err_free:
10339 free_event(event);
10340err:
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +010010341 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +020010342}
10343EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
10344
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010345void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
10346{
10347 struct perf_event_context *src_ctx;
10348 struct perf_event_context *dst_ctx;
10349 struct perf_event *event, *tmp;
10350 LIST_HEAD(events);
10351
10352 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
10353 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
10354
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010355 /*
10356 * See perf_event_ctx_lock() for comments on the details
10357 * of swizzling perf_event::ctx.
10358 */
10359 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010360 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
10361 event_entry) {
Peter Zijlstra45a0e072016-01-26 13:09:48 +010010362 perf_remove_from_context(event, 0);
Frederic Weisbecker9a545de2013-07-23 02:31:03 +020010363 unaccount_event_cpu(event, src_cpu);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010364 put_ctx(src_ctx);
Peter Zijlstra98861672013-10-03 16:02:23 +020010365 list_add(&event->migrate_entry, &events);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010366 }
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010367
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010368 /*
10369 * Wait for the events to quiesce before re-instating them.
10370 */
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010371 synchronize_rcu();
10372
Peter Zijlstra (Intel)8f95b432015-01-27 11:53:12 +010010373 /*
10374 * Re-instate events in 2 passes.
10375 *
10376 * Skip over group leaders and only install siblings on this first
10377 * pass, siblings will not get enabled without a leader, however a
10378 * leader will enable its siblings, even if those are still on the old
10379 * context.
10380 */
10381 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10382 if (event->group_leader == event)
10383 continue;
10384
10385 list_del(&event->migrate_entry);
10386 if (event->state >= PERF_EVENT_STATE_OFF)
10387 event->state = PERF_EVENT_STATE_INACTIVE;
10388 account_event_cpu(event, dst_cpu);
10389 perf_install_in_context(dst_ctx, event, dst_cpu);
10390 get_ctx(dst_ctx);
10391 }
10392
10393 /*
10394 * Once all the siblings are setup properly, install the group leaders
10395 * to make it go.
10396 */
Peter Zijlstra98861672013-10-03 16:02:23 +020010397 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10398 list_del(&event->migrate_entry);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010399 if (event->state >= PERF_EVENT_STATE_OFF)
10400 event->state = PERF_EVENT_STATE_INACTIVE;
Frederic Weisbecker9a545de2013-07-23 02:31:03 +020010401 account_event_cpu(event, dst_cpu);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010402 perf_install_in_context(dst_ctx, event, dst_cpu);
10403 get_ctx(dst_ctx);
10404 }
10405 mutex_unlock(&dst_ctx->mutex);
Peter Zijlstraf63a8da2015-01-23 12:24:14 +010010406 mutex_unlock(&src_ctx->mutex);
Yan, Zheng0cda4c02012-06-15 14:31:33 +080010407}
10408EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
10409
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010410static void sync_child_event(struct perf_event *child_event,
10411 struct task_struct *child)
10412{
10413 struct perf_event *parent_event = child_event->parent;
10414 u64 child_val;
10415
10416 if (child_event->attr.inherit_stat)
10417 perf_event_read_event(child_event, child);
10418
Peter Zijlstrab5e58792010-05-21 14:43:12 +020010419 child_val = perf_event_count(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010420
10421 /*
10422 * Add back the child's count to the parent's count:
10423 */
Peter Zijlstraa6e6dea2010-05-21 14:27:58 +020010424 atomic64_add(child_val, &parent_event->child_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010425 atomic64_add(child_event->total_time_enabled,
10426 &parent_event->child_total_time_enabled);
10427 atomic64_add(child_event->total_time_running,
10428 &parent_event->child_total_time_running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010429}
10430
10431static void
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010432perf_event_exit_event(struct perf_event *child_event,
10433 struct perf_event_context *child_ctx,
10434 struct task_struct *child)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010435{
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010436 struct perf_event *parent_event = child_event->parent;
10437
Peter Zijlstra1903d502014-07-15 17:27:27 +020010438 /*
10439 * Do not destroy the 'original' grouping; because of the context
10440 * switch optimization the original events could've ended up in a
10441 * random child task.
10442 *
10443 * If we were to destroy the original group, all group related
10444 * operations would cease to function properly after this random
10445 * child dies.
10446 *
10447 * Do destroy all inherited groups, we don't care about those
10448 * and being thorough is better.
10449 */
Peter Zijlstra32132a32016-01-11 15:40:59 +010010450 raw_spin_lock_irq(&child_ctx->lock);
10451 WARN_ON_ONCE(child_ctx->is_active);
10452
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010453 if (parent_event)
Peter Zijlstra32132a32016-01-11 15:40:59 +010010454 perf_group_detach(child_event);
10455 list_del_event(child_event, child_ctx);
Peter Zijlstra0d3d73a2017-09-05 14:16:28 +020010456 perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
Peter Zijlstra32132a32016-01-11 15:40:59 +010010457 raw_spin_unlock_irq(&child_ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010458
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010459 /*
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010460 * Parent events are governed by their filedesc, retain them.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010461 */
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010462 if (!parent_event) {
Jiri Olsa179033b2014-08-07 11:48:26 -040010463 perf_event_wakeup(child_event);
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010464 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010465 }
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010466 /*
10467 * Child events can be cleaned up.
10468 */
10469
10470 sync_child_event(child_event, child);
10471
10472 /*
10473 * Remove this event from the parent's list
10474 */
10475 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
10476 mutex_lock(&parent_event->child_mutex);
10477 list_del_init(&child_event->child_list);
10478 mutex_unlock(&parent_event->child_mutex);
10479
10480 /*
10481 * Kick perf_poll() for is_event_hup().
10482 */
10483 perf_event_wakeup(parent_event);
10484 free_event(child_event);
10485 put_event(parent_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010486}
10487
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010488static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010489{
Peter Zijlstra211de6e2014-09-30 19:23:08 +020010490 struct perf_event_context *child_ctx, *clone_ctx = NULL;
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010491 struct perf_event *child_event, *next;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010492
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010493 WARN_ON_ONCE(child != current);
10494
Peter Zijlstra6a3351b2016-01-25 14:09:54 +010010495 child_ctx = perf_pin_task_context(child, ctxn);
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010496 if (!child_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010497 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010498
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010499 /*
Peter Zijlstra6a3351b2016-01-25 14:09:54 +010010500 * In order to reduce the amount of tricky in ctx tear-down, we hold
10501 * ctx::mutex over the entire thing. This serializes against almost
10502 * everything that wants to access the ctx.
10503 *
10504 * The exception is sys_perf_event_open() /
10505 * perf_event_create_kernel_count() which does find_get_context()
10506 * without ctx::mutex (it cannot because of the move_group double mutex
10507 * lock thing). See the comments in perf_install_in_context().
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010508 */
Peter Zijlstra6a3351b2016-01-25 14:09:54 +010010509 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010510
10511 /*
Peter Zijlstra6a3351b2016-01-25 14:09:54 +010010512 * In a single ctx::lock section, de-schedule the events and detach the
10513 * context from the task such that we cannot ever get it scheduled back
10514 * in.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010515 */
Peter Zijlstra6a3351b2016-01-25 14:09:54 +010010516 raw_spin_lock_irq(&child_ctx->lock);
Alexander Shishkin487f05e2017-01-19 18:43:30 +020010517 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL);
Peter Zijlstra4a1c0f22014-06-23 16:12:42 +020010518
10519 /*
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010520 * Now that the context is inactive, destroy the task <-> ctx relation
10521 * and mark the context dead.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010522 */
Peter Zijlstra63b6da32016-01-14 16:05:37 +010010523 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
10524 put_ctx(child_ctx); /* cannot be last */
10525 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
10526 put_task_struct(current); /* cannot be last */
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010527
Peter Zijlstra211de6e2014-09-30 19:23:08 +020010528 clone_ctx = unclone_ctx(child_ctx);
Peter Zijlstra6a3351b2016-01-25 14:09:54 +010010529 raw_spin_unlock_irq(&child_ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010530
Peter Zijlstra211de6e2014-09-30 19:23:08 +020010531 if (clone_ctx)
10532 put_ctx(clone_ctx);
Peter Zijlstra4a1c0f22014-06-23 16:12:42 +020010533
10534 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010535 * Report the task dead after unscheduling the events so that we
10536 * won't get any samples after PERF_RECORD_EXIT. We can however still
10537 * get a few PERF_RECORD_READ events.
10538 */
10539 perf_event_task(child, child_ctx, 0);
10540
Peter Zijlstraebf905f2014-05-29 19:00:24 +020010541 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
Peter Zijlstra8ba289b2016-01-26 13:06:56 +010010542 perf_event_exit_event(child_event, child_ctx, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010543
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010544 mutex_unlock(&child_ctx->mutex);
10545
10546 put_ctx(child_ctx);
10547}
10548
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010549/*
10550 * When a child task exits, feed back event values to parent events.
Peter Zijlstra79c9ce52016-04-26 11:36:53 +020010551 *
10552 * Can be called with cred_guard_mutex held when called from
10553 * install_exec_creds().
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010554 */
10555void perf_event_exit_task(struct task_struct *child)
10556{
Peter Zijlstra88821352010-11-09 19:01:43 +010010557 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010558 int ctxn;
10559
Peter Zijlstra88821352010-11-09 19:01:43 +010010560 mutex_lock(&child->perf_event_mutex);
10561 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
10562 owner_entry) {
10563 list_del_init(&event->owner_entry);
10564
10565 /*
10566 * Ensure the list deletion is visible before we clear
10567 * the owner, closes a race against perf_release() where
10568 * we need to serialize on the owner->perf_event_mutex.
10569 */
Peter Zijlstraf47c02c2016-01-26 12:30:14 +010010570 smp_store_release(&event->owner, NULL);
Peter Zijlstra88821352010-11-09 19:01:43 +010010571 }
10572 mutex_unlock(&child->perf_event_mutex);
10573
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010574 for_each_task_context_nr(ctxn)
10575 perf_event_exit_task_context(child, ctxn);
Jiri Olsa4e93ad62015-11-04 16:00:05 +010010576
10577 /*
10578 * The perf_event_exit_task_context calls perf_event_task
10579 * with child's task_ctx, which generates EXIT events for
10580 * child contexts and sets child->perf_event_ctxp[] to NULL.
10581 * At this point we need to send EXIT events to cpu contexts.
10582 */
10583 perf_event_task(child, NULL, 0);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010584}
10585
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010586static void perf_free_event(struct perf_event *event,
10587 struct perf_event_context *ctx)
10588{
10589 struct perf_event *parent = event->parent;
10590
10591 if (WARN_ON_ONCE(!parent))
10592 return;
10593
10594 mutex_lock(&parent->child_mutex);
10595 list_del_init(&event->child_list);
10596 mutex_unlock(&parent->child_mutex);
10597
Al Viroa6fa9412012-08-20 14:59:25 +010010598 put_event(parent);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010599
Peter Zijlstra652884f2015-01-23 11:20:10 +010010600 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +020010601 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010602 list_del_event(event, ctx);
Peter Zijlstra652884f2015-01-23 11:20:10 +010010603 raw_spin_unlock_irq(&ctx->lock);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010604 free_event(event);
10605}
10606
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010607/*
Peter Zijlstra652884f2015-01-23 11:20:10 +010010608 * Free an unexposed, unused context as created by inheritance by
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010609 * perf_event_init_task below, used by fork() in case of fail.
Peter Zijlstra652884f2015-01-23 11:20:10 +010010610 *
10611 * Not all locks are strictly required, but take them anyway to be nice and
10612 * help out with the lockdep assertions.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010613 */
10614void perf_event_free_task(struct task_struct *task)
10615{
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010616 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010617 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010618 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010619
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010620 for_each_task_context_nr(ctxn) {
10621 ctx = task->perf_event_ctxp[ctxn];
10622 if (!ctx)
10623 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010624
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010625 mutex_lock(&ctx->mutex);
Peter Zijlstrae552a832017-03-16 13:47:48 +010010626 raw_spin_lock_irq(&ctx->lock);
10627 /*
10628 * Destroy the task <-> ctx relation and mark the context dead.
10629 *
10630 * This is important because even though the task hasn't been
10631 * exposed yet the context has been (through child_list).
10632 */
10633 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
10634 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
10635 put_task_struct(task); /* cannot be last */
10636 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010637
Peter Zijlstra15121c72017-03-16 13:47:50 +010010638 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010639 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010640
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010641 mutex_unlock(&ctx->mutex);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010642 put_ctx(ctx);
10643 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010644}
10645
Peter Zijlstra4e231c72010-09-09 21:01:59 +020010646void perf_event_delayed_put(struct task_struct *task)
10647{
10648 int ctxn;
10649
10650 for_each_task_context_nr(ctxn)
10651 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
10652}
10653
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010654struct file *perf_event_get(unsigned int fd)
Kaixu Xiaffe86902015-08-06 07:02:32 +000010655{
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010656 struct file *file;
Kaixu Xiaffe86902015-08-06 07:02:32 +000010657
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010658 file = fget_raw(fd);
10659 if (!file)
10660 return ERR_PTR(-EBADF);
Kaixu Xiaffe86902015-08-06 07:02:32 +000010661
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010662 if (file->f_op != &perf_fops) {
10663 fput(file);
10664 return ERR_PTR(-EBADF);
10665 }
Kaixu Xiaffe86902015-08-06 07:02:32 +000010666
Alexei Starovoitove03e7ee2016-01-25 20:59:49 -080010667 return file;
Kaixu Xiaffe86902015-08-06 07:02:32 +000010668}
10669
10670const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10671{
10672 if (!event)
10673 return ERR_PTR(-EINVAL);
10674
10675 return &event->attr;
10676}
10677
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010678/*
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +010010679 * Inherit a event from parent task to child task.
10680 *
10681 * Returns:
10682 * - valid pointer on success
10683 * - NULL for orphaned events
10684 * - IS_ERR() on error
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010685 */
10686static struct perf_event *
10687inherit_event(struct perf_event *parent_event,
10688 struct task_struct *parent,
10689 struct perf_event_context *parent_ctx,
10690 struct task_struct *child,
10691 struct perf_event *group_leader,
10692 struct perf_event_context *child_ctx)
10693{
Peter Zijlstra8ca2bd42017-09-05 14:12:35 +020010694 enum perf_event_state parent_state = parent_event->state;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010695 struct perf_event *child_event;
Peter Zijlstracee010e2010-09-10 12:51:54 +020010696 unsigned long flags;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010697
10698 /*
10699 * Instead of creating recursive hierarchies of events,
10700 * we link inherited events back to the original parent,
10701 * which has a filp for sure, which we use as the reference
10702 * count:
10703 */
10704 if (parent_event->parent)
10705 parent_event = parent_event->parent;
10706
10707 child_event = perf_event_alloc(&parent_event->attr,
10708 parent_event->cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +020010709 child,
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010710 group_leader, parent_event,
Matt Fleming79dff512015-01-23 18:45:42 +000010711 NULL, NULL, -1);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010712 if (IS_ERR(child_event))
10713 return child_event;
Al Viroa6fa9412012-08-20 14:59:25 +010010714
Peter Zijlstrac6e5b732016-01-15 16:07:41 +020010715 /*
10716 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
10717 * must be under the same lock in order to serialize against
10718 * perf_event_release_kernel(), such that either we must observe
10719 * is_orphaned_event() or they will observe us on the child_list.
10720 */
10721 mutex_lock(&parent_event->child_mutex);
Jiri Olsafadfe7b2014-08-01 14:33:02 +020010722 if (is_orphaned_event(parent_event) ||
10723 !atomic_long_inc_not_zero(&parent_event->refcount)) {
Peter Zijlstrac6e5b732016-01-15 16:07:41 +020010724 mutex_unlock(&parent_event->child_mutex);
Al Viroa6fa9412012-08-20 14:59:25 +010010725 free_event(child_event);
10726 return NULL;
10727 }
10728
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010729 get_ctx(child_ctx);
10730
10731 /*
10732 * Make the child state follow the state of the parent event,
10733 * not its attr.disabled bit. We hold the parent's mutex,
10734 * so we won't race with perf_event_{en, dis}able_family.
10735 */
Jiri Olsa1929def2014-09-12 13:18:27 +020010736 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010737 child_event->state = PERF_EVENT_STATE_INACTIVE;
10738 else
10739 child_event->state = PERF_EVENT_STATE_OFF;
10740
10741 if (parent_event->attr.freq) {
10742 u64 sample_period = parent_event->hw.sample_period;
10743 struct hw_perf_event *hwc = &child_event->hw;
10744
10745 hwc->sample_period = sample_period;
10746 hwc->last_period = sample_period;
10747
10748 local64_set(&hwc->period_left, sample_period);
10749 }
10750
10751 child_event->ctx = child_ctx;
10752 child_event->overflow_handler = parent_event->overflow_handler;
Avi Kivity4dc0da82011-06-29 18:42:35 +030010753 child_event->overflow_handler_context
10754 = parent_event->overflow_handler_context;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010755
10756 /*
Thomas Gleixner614b6782010-12-03 16:24:32 -020010757 * Precalculate sample_data sizes
10758 */
10759 perf_event__header_size(child_event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -020010760 perf_event__id_header_size(child_event);
Thomas Gleixner614b6782010-12-03 16:24:32 -020010761
10762 /*
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010763 * Link it up in the child's context:
10764 */
Peter Zijlstracee010e2010-09-10 12:51:54 +020010765 raw_spin_lock_irqsave(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010766 add_event_to_ctx(child_event, child_ctx);
Peter Zijlstracee010e2010-09-10 12:51:54 +020010767 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010768
10769 /*
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010770 * Link this into the parent event's child list
10771 */
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010772 list_add_tail(&child_event->child_list, &parent_event->child_list);
10773 mutex_unlock(&parent_event->child_mutex);
10774
10775 return child_event;
10776}
10777
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +010010778/*
10779 * Inherits an event group.
10780 *
10781 * This will quietly suppress orphaned events; !inherit_event() is not an error.
10782 * This matches with perf_event_release_kernel() removing all child events.
10783 *
10784 * Returns:
10785 * - 0 on success
10786 * - <0 on error
10787 */
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010788static int inherit_group(struct perf_event *parent_event,
10789 struct task_struct *parent,
10790 struct perf_event_context *parent_ctx,
10791 struct task_struct *child,
10792 struct perf_event_context *child_ctx)
10793{
10794 struct perf_event *leader;
10795 struct perf_event *sub;
10796 struct perf_event *child_ctr;
10797
10798 leader = inherit_event(parent_event, parent, parent_ctx,
10799 child, NULL, child_ctx);
10800 if (IS_ERR(leader))
10801 return PTR_ERR(leader);
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +010010802 /*
10803 * @leader can be NULL here because of is_orphaned_event(). In this
10804 * case inherit_event() will create individual events, similar to what
10805 * perf_group_detach() would do anyway.
10806 */
Peter Zijlstra97dee4f2010-09-07 15:35:33 +020010807 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10808 child_ctr = inherit_event(sub, parent, parent_ctx,
10809 child, leader, child_ctx);
10810 if (IS_ERR(child_ctr))
10811 return PTR_ERR(child_ctr);
10812 }
10813 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010814}
10815
Peter Zijlstrad8a8cfc2017-03-16 13:47:51 +010010816/*
10817 * Creates the child task context and tries to inherit the event-group.
10818 *
10819 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
10820 * inherited_all set when we 'fail' to inherit an orphaned event; this is
10821 * consistent with perf_event_release_kernel() removing all child events.
10822 *
10823 * Returns:
10824 * - 0 on success
10825 * - <0 on error
10826 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010827static int
10828inherit_task_group(struct perf_event *event, struct task_struct *parent,
10829 struct perf_event_context *parent_ctx,
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010830 struct task_struct *child, int ctxn,
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010831 int *inherited_all)
10832{
10833 int ret;
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010834 struct perf_event_context *child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010835
10836 if (!event->attr.inherit) {
10837 *inherited_all = 0;
10838 return 0;
10839 }
10840
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010841 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010842 if (!child_ctx) {
10843 /*
10844 * This is executed from the parent task context, so
10845 * inherit events that have been marked for cloning.
10846 * First allocate and initialize a context for the
10847 * child.
10848 */
Jiri Olsa734df5a2013-07-09 17:44:10 +020010849 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010850 if (!child_ctx)
10851 return -ENOMEM;
10852
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010853 child->perf_event_ctxp[ctxn] = child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010854 }
10855
10856 ret = inherit_group(event, parent, parent_ctx,
10857 child, child_ctx);
10858
10859 if (ret)
10860 *inherited_all = 0;
10861
10862 return ret;
10863}
10864
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010865/*
10866 * Initialize the perf_event context in task_struct
10867 */
Jiri Olsa985c8dc2014-06-24 10:20:24 +020010868static int perf_event_init_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010869{
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010870 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010871 struct perf_event_context *cloned_ctx;
10872 struct perf_event *event;
10873 struct task_struct *parent = current;
10874 int inherited_all = 1;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +010010875 unsigned long flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010876 int ret = 0;
10877
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010878 if (likely(!parent->perf_event_ctxp[ctxn]))
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010879 return 0;
10880
10881 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010882 * If the parent's context is a clone, pin it so it won't get
10883 * swapped under us.
10884 */
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010885 parent_ctx = perf_pin_task_context(parent, ctxn);
Peter Zijlstraffb4ef22014-05-05 19:12:20 +020010886 if (!parent_ctx)
10887 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010888
10889 /*
10890 * No need to check if parent_ctx != NULL here; since we saw
10891 * it non-NULL earlier, the only reason for it to become NULL
10892 * is if we exit, and since we're currently in the middle of
10893 * a fork we can't be exiting at the same time.
10894 */
10895
10896 /*
10897 * Lock the parent list. No need to lock the child - not PID
10898 * hashed yet and not running, so nobody can access it.
10899 */
10900 mutex_lock(&parent_ctx->mutex);
10901
10902 /*
10903 * We dont have to disable NMIs - we are only looking at
10904 * the list, not manipulating it:
10905 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010906 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010907 ret = inherit_task_group(event, parent, parent_ctx,
10908 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010909 if (ret)
Peter Zijlstrae7cc4862017-03-16 13:47:49 +010010910 goto out_unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010911 }
10912
Thomas Gleixnerdddd3372010-11-24 10:05:55 +010010913 /*
10914 * We can't hold ctx->lock when iterating the ->flexible_group list due
10915 * to allocations, but we need to prevent rotation because
10916 * rotate_ctx() will change the list from interrupt context.
10917 */
10918 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10919 parent_ctx->rotate_disable = 1;
10920 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10921
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010922 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010923 ret = inherit_task_group(event, parent, parent_ctx,
10924 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010925 if (ret)
Peter Zijlstrae7cc4862017-03-16 13:47:49 +010010926 goto out_unlock;
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010927 }
10928
Thomas Gleixnerdddd3372010-11-24 10:05:55 +010010929 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10930 parent_ctx->rotate_disable = 0;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +010010931
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010932 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +010010933
Peter Zijlstra05cbaa22009-12-30 16:00:35 +010010934 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010935 /*
10936 * Mark the child context as a clone of the parent
10937 * context, or of whatever the parent is a clone of.
Peter Zijlstrac5ed5142011-01-17 13:45:37 +010010938 *
10939 * Note that if the parent is a clone, the holding of
10940 * parent_ctx->lock avoids it from being uncloned.
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010941 */
Peter Zijlstrac5ed5142011-01-17 13:45:37 +010010942 cloned_ctx = parent_ctx->parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010943 if (cloned_ctx) {
10944 child_ctx->parent_ctx = cloned_ctx;
10945 child_ctx->parent_gen = parent_ctx->parent_gen;
10946 } else {
10947 child_ctx->parent_ctx = parent_ctx;
10948 child_ctx->parent_gen = parent_ctx->generation;
10949 }
10950 get_ctx(child_ctx->parent_ctx);
10951 }
10952
Peter Zijlstrac5ed5142011-01-17 13:45:37 +010010953 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
Peter Zijlstrae7cc4862017-03-16 13:47:49 +010010954out_unlock:
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010955 mutex_unlock(&parent_ctx->mutex);
10956
10957 perf_unpin_context(parent_ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010010958 put_ctx(parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010959
10960 return ret;
10961}
10962
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010963/*
10964 * Initialize the perf_event context in task_struct
10965 */
10966int perf_event_init_task(struct task_struct *child)
10967{
10968 int ctxn, ret;
10969
Oleg Nesterov8550d7c2011-01-19 19:22:28 +010010970 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
10971 mutex_init(&child->perf_event_mutex);
10972 INIT_LIST_HEAD(&child->perf_event_list);
10973
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010974 for_each_task_context_nr(ctxn) {
10975 ret = perf_event_init_context(child, ctxn);
Peter Zijlstra6c72e3502014-10-02 16:17:02 -070010976 if (ret) {
10977 perf_event_free_task(child);
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010978 return ret;
Peter Zijlstra6c72e3502014-10-02 16:17:02 -070010979 }
Peter Zijlstra8dc85d5472010-09-02 16:50:03 +020010980 }
10981
10982 return 0;
10983}
10984
Paul Mackerras220b1402010-03-10 20:45:52 +110010985static void __init perf_event_init_all_cpus(void)
10986{
Peter Zijlstrab28ab832010-09-06 14:48:15 +020010987 struct swevent_htable *swhash;
Paul Mackerras220b1402010-03-10 20:45:52 +110010988 int cpu;
Paul Mackerras220b1402010-03-10 20:45:52 +110010989
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020010990 zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
10991
Paul Mackerras220b1402010-03-10 20:45:52 +110010992 for_each_possible_cpu(cpu) {
Peter Zijlstrab28ab832010-09-06 14:48:15 +020010993 swhash = &per_cpu(swevent_htable, cpu);
10994 mutex_init(&swhash->hlist_mutex);
Mark Rutland2fde4f92015-01-07 15:01:54 +000010995 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
Kan Liangf2fb6be2016-03-23 11:24:37 -070010996
10997 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
10998 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
Peter Zijlstrae48c1782016-07-06 09:18:30 +020010999
David Carrillo-Cisneros058fe1c2017-01-18 11:24:53 -080011000#ifdef CONFIG_CGROUP_PERF
11001 INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
11002#endif
Peter Zijlstrae48c1782016-07-06 09:18:30 +020011003 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
Paul Mackerras220b1402010-03-10 20:45:52 +110011004 }
11005}
11006
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011007void perf_swevent_init_cpu(unsigned int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011008{
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011009 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011010
Peter Zijlstrab28ab832010-09-06 14:48:15 +020011011 mutex_lock(&swhash->hlist_mutex);
Thomas Gleixner059fcd82016-02-09 20:11:34 +000011012 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020011013 struct swevent_hlist *hlist;
11014
Peter Zijlstrab28ab832010-09-06 14:48:15 +020011015 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
11016 WARN_ON(!hlist);
11017 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020011018 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +020011019 mutex_unlock(&swhash->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011020}
11021
Dave Young2965faa2015-09-09 15:38:55 -070011022#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011023static void __perf_event_exit_context(void *__info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011024{
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011025 struct perf_event_context *ctx = __info;
Peter Zijlstrafae3fde2016-01-11 15:00:50 +010011026 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
11027 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011028
Peter Zijlstrafae3fde2016-01-11 15:00:50 +010011029 raw_spin_lock(&ctx->lock);
Peter Zijlstra0ee098c2017-09-05 13:24:28 +020011030 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +010011031 list_for_each_entry(event, &ctx->event_list, event_entry)
Peter Zijlstra45a0e072016-01-26 13:09:48 +010011032 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
Peter Zijlstrafae3fde2016-01-11 15:00:50 +010011033 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011034}
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011035
11036static void perf_event_exit_cpu_context(int cpu)
11037{
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011038 struct perf_cpu_context *cpuctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011039 struct perf_event_context *ctx;
11040 struct pmu *pmu;
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011041
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011042 mutex_lock(&pmus_lock);
11043 list_for_each_entry(pmu, &pmus, entry) {
11044 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
11045 ctx = &cpuctx->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011046
11047 mutex_lock(&ctx->mutex);
11048 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011049 cpuctx->online = 0;
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011050 mutex_unlock(&ctx->mutex);
11051 }
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011052 cpumask_clear_cpu(cpu, perf_online_mask);
11053 mutex_unlock(&pmus_lock);
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011054}
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011055#else
Peter Zijlstra108b02c2010-09-06 14:32:03 +020011056
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011057static void perf_event_exit_cpu_context(int cpu) { }
11058
11059#endif
11060
Thomas Gleixnera63fbed2017-05-24 10:15:34 +020011061int perf_event_init_cpu(unsigned int cpu)
11062{
11063 struct perf_cpu_context *cpuctx;
11064 struct perf_event_context *ctx;
11065 struct pmu *pmu;
11066
11067 perf_swevent_init_cpu(cpu);
11068
11069 mutex_lock(&pmus_lock);
11070 cpumask_set_cpu(cpu, perf_online_mask);
11071 list_for_each_entry(pmu, &pmus, entry) {
11072 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
11073 ctx = &cpuctx->ctx;
11074
11075 mutex_lock(&ctx->mutex);
11076 cpuctx->online = 1;
11077 mutex_unlock(&ctx->mutex);
11078 }
11079 mutex_unlock(&pmus_lock);
11080
11081 return 0;
11082}
11083
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011084int perf_event_exit_cpu(unsigned int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011085{
Peter Zijlstrae3703f82014-02-24 12:06:12 +010011086 perf_event_exit_cpu_context(cpu);
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011087 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011088}
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011089
Peter Zijlstrac2774432010-12-08 15:29:02 +010011090static int
11091perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
11092{
11093 int cpu;
11094
11095 for_each_online_cpu(cpu)
11096 perf_event_exit_cpu(cpu);
11097
11098 return NOTIFY_OK;
11099}
11100
11101/*
11102 * Run the perf reboot notifier at the very last possible moment so that
11103 * the generic watchdog code runs as long as possible.
11104 */
11105static struct notifier_block perf_reboot_notifier = {
11106 .notifier_call = perf_reboot,
11107 .priority = INT_MIN,
11108};
11109
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011110void __init perf_event_init(void)
11111{
Jason Wessel3c502e72010-11-04 17:33:01 -050011112 int ret;
11113
Peter Zijlstra2e80a822010-11-17 23:17:36 +010011114 idr_init(&pmu_idr);
11115
Paul Mackerras220b1402010-03-10 20:45:52 +110011116 perf_event_init_all_cpus();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +020011117 init_srcu_struct(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +010011118 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
11119 perf_pmu_register(&perf_cpu_clock, NULL, -1);
11120 perf_pmu_register(&perf_task_clock, NULL, -1);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +020011121 perf_tp_register();
Thomas Gleixner00e16c32016-07-13 17:16:09 +000011122 perf_event_init_cpu(smp_processor_id());
Peter Zijlstrac2774432010-12-08 15:29:02 +010011123 register_reboot_notifier(&perf_reboot_notifier);
Jason Wessel3c502e72010-11-04 17:33:01 -050011124
11125 ret = init_hw_breakpoint();
11126 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
Gleb Natapovb2029522011-11-27 17:59:09 +020011127
Jiri Olsab01c3a02012-03-23 15:41:20 +010011128 /*
11129 * Build time assertion that we keep the data_head at the intended
11130 * location. IOW, validation we got the __reserved[] size right.
11131 */
11132 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
11133 != 1024);
Ingo Molnarcdd6c482009-09-21 12:02:48 +020011134}
Peter Zijlstraabe43402010-11-17 23:17:37 +010011135
Cody P Schaferfd979c02015-01-30 13:45:57 -080011136ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
11137 char *page)
11138{
11139 struct perf_pmu_events_attr *pmu_attr =
11140 container_of(attr, struct perf_pmu_events_attr, attr);
11141
11142 if (pmu_attr->event_str)
11143 return sprintf(page, "%s\n", pmu_attr->event_str);
11144
11145 return 0;
11146}
Thomas Gleixner675965b2016-02-22 22:19:27 +000011147EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
Cody P Schaferfd979c02015-01-30 13:45:57 -080011148
Peter Zijlstraabe43402010-11-17 23:17:37 +010011149static int __init perf_event_sysfs_init(void)
11150{
11151 struct pmu *pmu;
11152 int ret;
11153
11154 mutex_lock(&pmus_lock);
11155
11156 ret = bus_register(&pmu_bus);
11157 if (ret)
11158 goto unlock;
11159
11160 list_for_each_entry(pmu, &pmus, entry) {
11161 if (!pmu->name || pmu->type < 0)
11162 continue;
11163
11164 ret = pmu_dev_alloc(pmu);
11165 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
11166 }
11167 pmu_bus_running = 1;
11168 ret = 0;
11169
11170unlock:
11171 mutex_unlock(&pmus_lock);
11172
11173 return ret;
11174}
11175device_initcall(perf_event_sysfs_init);
Stephane Eraniane5d13672011-02-14 11:20:01 +020011176
11177#ifdef CONFIG_CGROUP_PERF
Tejun Heoeb954192013-08-08 20:11:23 -040011178static struct cgroup_subsys_state *
11179perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Stephane Eraniane5d13672011-02-14 11:20:01 +020011180{
11181 struct perf_cgroup *jc;
Stephane Eraniane5d13672011-02-14 11:20:01 +020011182
Li Zefan1b15d052011-03-03 14:26:06 +080011183 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
Stephane Eraniane5d13672011-02-14 11:20:01 +020011184 if (!jc)
11185 return ERR_PTR(-ENOMEM);
11186
Stephane Eraniane5d13672011-02-14 11:20:01 +020011187 jc->info = alloc_percpu(struct perf_cgroup_info);
11188 if (!jc->info) {
11189 kfree(jc);
11190 return ERR_PTR(-ENOMEM);
11191 }
11192
Stephane Eraniane5d13672011-02-14 11:20:01 +020011193 return &jc->css;
11194}
11195
Tejun Heoeb954192013-08-08 20:11:23 -040011196static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
Stephane Eraniane5d13672011-02-14 11:20:01 +020011197{
Tejun Heoeb954192013-08-08 20:11:23 -040011198 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
11199
Stephane Eraniane5d13672011-02-14 11:20:01 +020011200 free_percpu(jc->info);
11201 kfree(jc);
11202}
11203
11204static int __perf_cgroup_move(void *info)
11205{
11206 struct task_struct *task = info;
Stephane Eranianddaaf4e2015-11-12 11:00:03 +010011207 rcu_read_lock();
Stephane Eraniane5d13672011-02-14 11:20:01 +020011208 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
Stephane Eranianddaaf4e2015-11-12 11:00:03 +010011209 rcu_read_unlock();
Stephane Eraniane5d13672011-02-14 11:20:01 +020011210 return 0;
11211}
11212
Tejun Heo1f7dd3e52015-12-03 10:18:21 -050011213static void perf_cgroup_attach(struct cgroup_taskset *tset)
Stephane Eraniane5d13672011-02-14 11:20:01 +020011214{
Tejun Heobb9d97b2011-12-12 18:12:21 -080011215 struct task_struct *task;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -050011216 struct cgroup_subsys_state *css;
Tejun Heobb9d97b2011-12-12 18:12:21 -080011217
Tejun Heo1f7dd3e52015-12-03 10:18:21 -050011218 cgroup_taskset_for_each(task, css, tset)
Tejun Heobb9d97b2011-12-12 18:12:21 -080011219 task_function_call(task, __perf_cgroup_move, task);
Stephane Eraniane5d13672011-02-14 11:20:01 +020011220}
11221
Tejun Heo073219e2014-02-08 10:36:58 -050011222struct cgroup_subsys perf_event_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -080011223 .css_alloc = perf_cgroup_css_alloc,
11224 .css_free = perf_cgroup_css_free,
Tejun Heobb9d97b2011-12-12 18:12:21 -080011225 .attach = perf_cgroup_attach,
Tejun Heo968ebff2017-01-29 14:35:20 -050011226 /*
11227 * Implicitly enable on dfl hierarchy so that perf events can
11228 * always be filtered by cgroup2 path as long as perf_event
11229 * controller is not mounted on a legacy hierarchy.
11230 */
11231 .implicit_on_dfl = true,
Tejun Heo8cfd8142017-07-21 11:14:51 -040011232 .threaded = true,
Stephane Eraniane5d13672011-02-14 11:20:01 +020011233};
11234#endif /* CONFIG_CGROUP_PERF */