Thomas Gleixner | 8e86e01 | 2019-01-16 12:10:59 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2 | /* |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 3 | * Performance events core code: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4 | * |
| 5 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 7 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra |
Al Viro | d36b691 | 2011-12-29 17:09:01 -0500 | [diff] [blame] | 8 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/cpu.h> |
| 14 | #include <linux/smp.h> |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 15 | #include <linux/idr.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 16 | #include <linux/file.h> |
| 17 | #include <linux/poll.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 19 | #include <linux/hash.h> |
Frederic Weisbecker | 12351ef | 2013-04-20 15:48:22 +0200 | [diff] [blame] | 20 | #include <linux/tick.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 21 | #include <linux/sysfs.h> |
| 22 | #include <linux/dcache.h> |
| 23 | #include <linux/percpu.h> |
| 24 | #include <linux/ptrace.h> |
Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 25 | #include <linux/reboot.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 26 | #include <linux/vmstat.h> |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 27 | #include <linux/device.h> |
Paul Gortmaker | 6e5fdee | 2011-05-26 16:00:52 -0400 | [diff] [blame] | 28 | #include <linux/export.h> |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 29 | #include <linux/vmalloc.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 30 | #include <linux/hardirq.h> |
| 31 | #include <linux/rculist.h> |
| 32 | #include <linux/uaccess.h> |
| 33 | #include <linux/syscalls.h> |
| 34 | #include <linux/anon_inodes.h> |
| 35 | #include <linux/kernel_stat.h> |
Matt Fleming | 39bed6c | 2015-01-23 18:45:40 +0000 | [diff] [blame] | 36 | #include <linux/cgroup.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 37 | #include <linux/perf_event.h> |
Steven Rostedt (Red Hat) | af658dc | 2015-04-29 14:36:05 -0400 | [diff] [blame] | 38 | #include <linux/trace_events.h> |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 39 | #include <linux/hw_breakpoint.h> |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 40 | #include <linux/mm_types.h> |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 41 | #include <linux/module.h> |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 42 | #include <linux/mman.h> |
Pawel Moll | b3f2078 | 2014-06-13 16:03:32 +0100 | [diff] [blame] | 43 | #include <linux/compat.h> |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 44 | #include <linux/bpf.h> |
| 45 | #include <linux/filter.h> |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 46 | #include <linux/namei.h> |
| 47 | #include <linux/parser.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 48 | #include <linux/sched/clock.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 49 | #include <linux/sched/mm.h> |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 50 | #include <linux/proc_ns.h> |
| 51 | #include <linux/mount.h> |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 52 | #include <linux/min_heap.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 53 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 54 | #include "internal.h" |
| 55 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 56 | #include <asm/irq_regs.h> |
| 57 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 58 | typedef int (*remote_function_f)(void *); |
| 59 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 60 | struct remote_function_call { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 61 | struct task_struct *p; |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 62 | remote_function_f func; |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 63 | void *info; |
| 64 | int ret; |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 65 | }; |
| 66 | |
| 67 | static void remote_function(void *data) |
| 68 | { |
| 69 | struct remote_function_call *tfc = data; |
| 70 | struct task_struct *p = tfc->p; |
| 71 | |
| 72 | if (p) { |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 73 | /* -EAGAIN */ |
| 74 | if (task_cpu(p) != smp_processor_id()) |
| 75 | return; |
| 76 | |
| 77 | /* |
| 78 | * Now that we're on right CPU with IRQs disabled, we can test |
| 79 | * if we hit the right task without races. |
| 80 | */ |
| 81 | |
| 82 | tfc->ret = -ESRCH; /* No such (running) process */ |
| 83 | if (p != current) |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 84 | return; |
| 85 | } |
| 86 | |
| 87 | tfc->ret = tfc->func(tfc->info); |
| 88 | } |
| 89 | |
| 90 | /** |
| 91 | * task_function_call - call a function on the cpu on which a task runs |
| 92 | * @p: the task to evaluate |
| 93 | * @func: the function to be called |
| 94 | * @info: the function call argument |
| 95 | * |
| 96 | * Calls the function @func when the task is currently running. This might |
| 97 | * be on the current CPU, which just calls the function directly |
| 98 | * |
| 99 | * returns: @func return value, or |
| 100 | * -ESRCH - when the process isn't running |
| 101 | * -EAGAIN - when the process moved away |
| 102 | */ |
| 103 | static int |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 104 | task_function_call(struct task_struct *p, remote_function_f func, void *info) |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 105 | { |
| 106 | struct remote_function_call data = { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 107 | .p = p, |
| 108 | .func = func, |
| 109 | .info = info, |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 110 | .ret = -EAGAIN, |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 111 | }; |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 112 | int ret; |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 113 | |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 114 | do { |
| 115 | ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); |
| 116 | if (!ret) |
| 117 | ret = data.ret; |
| 118 | } while (ret == -EAGAIN); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 119 | |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 120 | return ret; |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | /** |
| 124 | * cpu_function_call - call a function on the cpu |
| 125 | * @func: the function to be called |
| 126 | * @info: the function call argument |
| 127 | * |
| 128 | * Calls the function @func on the remote cpu. |
| 129 | * |
| 130 | * returns: @func return value or -ENXIO when the cpu is offline |
| 131 | */ |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 132 | static int cpu_function_call(int cpu, remote_function_f func, void *info) |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 133 | { |
| 134 | struct remote_function_call data = { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 135 | .p = NULL, |
| 136 | .func = func, |
| 137 | .info = info, |
| 138 | .ret = -ENXIO, /* No such CPU */ |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 139 | }; |
| 140 | |
| 141 | smp_call_function_single(cpu, remote_function, &data, 1); |
| 142 | |
| 143 | return data.ret; |
| 144 | } |
| 145 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 146 | static inline struct perf_cpu_context * |
| 147 | __get_cpu_context(struct perf_event_context *ctx) |
| 148 | { |
| 149 | return this_cpu_ptr(ctx->pmu->pmu_cpu_context); |
| 150 | } |
| 151 | |
| 152 | static void perf_ctx_lock(struct perf_cpu_context *cpuctx, |
| 153 | struct perf_event_context *ctx) |
| 154 | { |
| 155 | raw_spin_lock(&cpuctx->ctx.lock); |
| 156 | if (ctx) |
| 157 | raw_spin_lock(&ctx->lock); |
| 158 | } |
| 159 | |
| 160 | static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, |
| 161 | struct perf_event_context *ctx) |
| 162 | { |
| 163 | if (ctx) |
| 164 | raw_spin_unlock(&ctx->lock); |
| 165 | raw_spin_unlock(&cpuctx->ctx.lock); |
| 166 | } |
| 167 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 168 | #define TASK_TOMBSTONE ((void *)-1L) |
| 169 | |
| 170 | static bool is_kernel_event(struct perf_event *event) |
| 171 | { |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 172 | return READ_ONCE(event->owner) == TASK_TOMBSTONE; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 173 | } |
| 174 | |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 175 | /* |
| 176 | * On task ctx scheduling... |
| 177 | * |
| 178 | * When !ctx->nr_events a task context will not be scheduled. This means |
| 179 | * we can disable the scheduler hooks (for performance) without leaving |
| 180 | * pending task ctx state. |
| 181 | * |
| 182 | * This however results in two special cases: |
| 183 | * |
| 184 | * - removing the last event from a task ctx; this is relatively straight |
| 185 | * forward and is done in __perf_remove_from_context. |
| 186 | * |
| 187 | * - adding the first event to a task ctx; this is tricky because we cannot |
| 188 | * rely on ctx->is_active and therefore cannot use event_function_call(). |
| 189 | * See perf_install_in_context(). |
| 190 | * |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 191 | * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set. |
| 192 | */ |
| 193 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 194 | typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, |
| 195 | struct perf_event_context *, void *); |
| 196 | |
| 197 | struct event_function_struct { |
| 198 | struct perf_event *event; |
| 199 | event_f func; |
| 200 | void *data; |
| 201 | }; |
| 202 | |
| 203 | static int event_function(void *info) |
| 204 | { |
| 205 | struct event_function_struct *efs = info; |
| 206 | struct perf_event *event = efs->event; |
| 207 | struct perf_event_context *ctx = event->ctx; |
| 208 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 209 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 210 | int ret = 0; |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 211 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 212 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 213 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 214 | perf_ctx_lock(cpuctx, task_ctx); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 215 | /* |
| 216 | * Since we do the IPI call without holding ctx->lock things can have |
| 217 | * changed, double check we hit the task we set out to hit. |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 218 | */ |
| 219 | if (ctx->task) { |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 220 | if (ctx->task != current) { |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 221 | ret = -ESRCH; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 222 | goto unlock; |
| 223 | } |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 224 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 225 | /* |
| 226 | * We only use event_function_call() on established contexts, |
| 227 | * and event_function() is only ever called when active (or |
| 228 | * rather, we'll have bailed in task_function_call() or the |
| 229 | * above ctx->task != current test), therefore we must have |
| 230 | * ctx->is_active here. |
| 231 | */ |
| 232 | WARN_ON_ONCE(!ctx->is_active); |
| 233 | /* |
| 234 | * And since we have ctx->is_active, cpuctx->task_ctx must |
| 235 | * match. |
| 236 | */ |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 237 | WARN_ON_ONCE(task_ctx != ctx); |
| 238 | } else { |
| 239 | WARN_ON_ONCE(&cpuctx->ctx != ctx); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 240 | } |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 241 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 242 | efs->func(event, cpuctx, ctx, efs->data); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 243 | unlock: |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 244 | perf_ctx_unlock(cpuctx, task_ctx); |
| 245 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 246 | return ret; |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 247 | } |
| 248 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 249 | static void event_function_call(struct perf_event *event, event_f func, void *data) |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 250 | { |
| 251 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 252 | struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 253 | struct event_function_struct efs = { |
| 254 | .event = event, |
| 255 | .func = func, |
| 256 | .data = data, |
| 257 | }; |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 258 | |
Peter Zijlstra | c97f473 | 2016-01-14 10:51:03 +0100 | [diff] [blame] | 259 | if (!event->parent) { |
| 260 | /* |
| 261 | * If this is a !child event, we must hold ctx::mutex to |
| 262 | * stabilize the the event->ctx relation. See |
| 263 | * perf_event_ctx_lock(). |
| 264 | */ |
| 265 | lockdep_assert_held(&ctx->mutex); |
| 266 | } |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 267 | |
| 268 | if (!task) { |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 269 | cpu_function_call(event->cpu, event_function, &efs); |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 270 | return; |
| 271 | } |
| 272 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 273 | if (task == TASK_TOMBSTONE) |
| 274 | return; |
| 275 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 276 | again: |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 277 | if (!task_function_call(task, event_function, &efs)) |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 278 | return; |
| 279 | |
| 280 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 281 | /* |
| 282 | * Reload the task pointer, it might have been changed by |
| 283 | * a concurrent perf_event_context_sched_out(). |
| 284 | */ |
| 285 | task = ctx->task; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 286 | if (task == TASK_TOMBSTONE) { |
| 287 | raw_spin_unlock_irq(&ctx->lock); |
| 288 | return; |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 289 | } |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 290 | if (ctx->is_active) { |
| 291 | raw_spin_unlock_irq(&ctx->lock); |
| 292 | goto again; |
| 293 | } |
| 294 | func(event, NULL, ctx, data); |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 295 | raw_spin_unlock_irq(&ctx->lock); |
| 296 | } |
| 297 | |
Peter Zijlstra | cca2094 | 2016-08-16 13:33:26 +0200 | [diff] [blame] | 298 | /* |
| 299 | * Similar to event_function_call() + event_function(), but hard assumes IRQs |
| 300 | * are already disabled and we're on the right CPU. |
| 301 | */ |
| 302 | static void event_function_local(struct perf_event *event, event_f func, void *data) |
| 303 | { |
| 304 | struct perf_event_context *ctx = event->ctx; |
| 305 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 306 | struct task_struct *task = READ_ONCE(ctx->task); |
| 307 | struct perf_event_context *task_ctx = NULL; |
| 308 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 309 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | cca2094 | 2016-08-16 13:33:26 +0200 | [diff] [blame] | 310 | |
| 311 | if (task) { |
| 312 | if (task == TASK_TOMBSTONE) |
| 313 | return; |
| 314 | |
| 315 | task_ctx = ctx; |
| 316 | } |
| 317 | |
| 318 | perf_ctx_lock(cpuctx, task_ctx); |
| 319 | |
| 320 | task = ctx->task; |
| 321 | if (task == TASK_TOMBSTONE) |
| 322 | goto unlock; |
| 323 | |
| 324 | if (task) { |
| 325 | /* |
| 326 | * We must be either inactive or active and the right task, |
| 327 | * otherwise we're screwed, since we cannot IPI to somewhere |
| 328 | * else. |
| 329 | */ |
| 330 | if (ctx->is_active) { |
| 331 | if (WARN_ON_ONCE(task != current)) |
| 332 | goto unlock; |
| 333 | |
| 334 | if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) |
| 335 | goto unlock; |
| 336 | } |
| 337 | } else { |
| 338 | WARN_ON_ONCE(&cpuctx->ctx != ctx); |
| 339 | } |
| 340 | |
| 341 | func(event, cpuctx, ctx, data); |
| 342 | unlock: |
| 343 | perf_ctx_unlock(cpuctx, task_ctx); |
| 344 | } |
| 345 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 346 | #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ |
| 347 | PERF_FLAG_FD_OUTPUT |\ |
Yann Droneaud | a21b0b3 | 2014-01-05 21:36:33 +0100 | [diff] [blame] | 348 | PERF_FLAG_PID_CGROUP |\ |
| 349 | PERF_FLAG_FD_CLOEXEC) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 350 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 351 | /* |
| 352 | * branch priv levels that need permission checks |
| 353 | */ |
| 354 | #define PERF_SAMPLE_BRANCH_PERM_PLM \ |
| 355 | (PERF_SAMPLE_BRANCH_KERNEL |\ |
| 356 | PERF_SAMPLE_BRANCH_HV) |
| 357 | |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 358 | enum event_type_t { |
| 359 | EVENT_FLEXIBLE = 0x1, |
| 360 | EVENT_PINNED = 0x2, |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 361 | EVENT_TIME = 0x4, |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 362 | /* see ctx_resched() for details */ |
| 363 | EVENT_CPU = 0x8, |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 364 | EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, |
| 365 | }; |
| 366 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 367 | /* |
| 368 | * perf_sched_events : >0 events exist |
| 369 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
| 370 | */ |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 371 | |
| 372 | static void perf_sched_delayed(struct work_struct *work); |
| 373 | DEFINE_STATIC_KEY_FALSE(perf_sched_events); |
| 374 | static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed); |
| 375 | static DEFINE_MUTEX(perf_sched_mutex); |
| 376 | static atomic_t perf_sched_count; |
| 377 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 378 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 379 | static DEFINE_PER_CPU(int, perf_sched_cb_usages); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 380 | static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 381 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 382 | static atomic_t nr_mmap_events __read_mostly; |
| 383 | static atomic_t nr_comm_events __read_mostly; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 384 | static atomic_t nr_namespaces_events __read_mostly; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 385 | static atomic_t nr_task_events __read_mostly; |
Frederic Weisbecker | 948b26b | 2013-08-02 18:29:55 +0200 | [diff] [blame] | 386 | static atomic_t nr_freq_events __read_mostly; |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 387 | static atomic_t nr_switch_events __read_mostly; |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 388 | static atomic_t nr_ksymbol_events __read_mostly; |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 389 | static atomic_t nr_bpf_events __read_mostly; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 390 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 391 | static LIST_HEAD(pmus); |
| 392 | static DEFINE_MUTEX(pmus_lock); |
| 393 | static struct srcu_struct pmus_srcu; |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 394 | static cpumask_var_t perf_online_mask; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 395 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 396 | /* |
| 397 | * perf event paranoia level: |
| 398 | * -1 - not paranoid at all |
| 399 | * 0 - disallow raw tracepoint access for unpriv |
| 400 | * 1 - disallow cpu events for unpriv |
| 401 | * 2 - disallow kernel profiling for unpriv |
| 402 | */ |
Andy Lutomirski | 0161028 | 2016-05-09 15:48:51 -0700 | [diff] [blame] | 403 | int sysctl_perf_event_paranoid __read_mostly = 2; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 404 | |
Frederic Weisbecker | 2044338 | 2011-03-31 03:33:29 +0200 | [diff] [blame] | 405 | /* Minimum for 512 kiB + 1 user control page */ |
| 406 | int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 407 | |
| 408 | /* |
| 409 | * max perf event sample rate |
| 410 | */ |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 411 | #define DEFAULT_MAX_SAMPLE_RATE 100000 |
| 412 | #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) |
| 413 | #define DEFAULT_CPU_TIME_MAX_PERCENT 25 |
| 414 | |
| 415 | int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; |
| 416 | |
| 417 | static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); |
| 418 | static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; |
| 419 | |
Peter Zijlstra | d9494cb | 2013-10-17 15:36:19 +0200 | [diff] [blame] | 420 | static int perf_sample_allowed_ns __read_mostly = |
| 421 | DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 422 | |
Geliang Tang | 18ab2cd | 2015-09-27 23:25:50 +0800 | [diff] [blame] | 423 | static void update_perf_cpu_limits(void) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 424 | { |
| 425 | u64 tmp = perf_sample_period_ns; |
| 426 | |
| 427 | tmp *= sysctl_perf_cpu_time_max_percent; |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 428 | tmp = div_u64(tmp, 100); |
| 429 | if (!tmp) |
| 430 | tmp = 1; |
| 431 | |
| 432 | WRITE_ONCE(perf_sample_allowed_ns, tmp); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 433 | } |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 434 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 435 | static bool perf_rotate_context(struct perf_cpu_context *cpuctx); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 436 | |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 437 | int perf_proc_update_handler(struct ctl_table *table, int write, |
| 438 | void __user *buffer, size_t *lenp, |
| 439 | loff_t *ppos) |
| 440 | { |
Stephane Eranian | 1a51c5d | 2019-01-10 17:17:16 -0800 | [diff] [blame] | 441 | int ret; |
| 442 | int perf_cpu = sysctl_perf_cpu_time_max_percent; |
Kan Liang | ab7fdef | 2016-05-03 00:26:06 -0700 | [diff] [blame] | 443 | /* |
| 444 | * If throttling is disabled don't allow the write: |
| 445 | */ |
Stephane Eranian | 1a51c5d | 2019-01-10 17:17:16 -0800 | [diff] [blame] | 446 | if (write && (perf_cpu == 100 || perf_cpu == 0)) |
Kan Liang | ab7fdef | 2016-05-03 00:26:06 -0700 | [diff] [blame] | 447 | return -EINVAL; |
| 448 | |
Stephane Eranian | 1a51c5d | 2019-01-10 17:17:16 -0800 | [diff] [blame] | 449 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 450 | if (ret || !write) |
| 451 | return ret; |
| 452 | |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 453 | max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 454 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
| 455 | update_perf_cpu_limits(); |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 456 | |
| 457 | return 0; |
| 458 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 459 | |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 460 | int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; |
| 461 | |
| 462 | int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, |
| 463 | void __user *buffer, size_t *lenp, |
| 464 | loff_t *ppos) |
| 465 | { |
Tan Xiaojun | 1572e45 | 2017-02-23 14:04:39 +0800 | [diff] [blame] | 466 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 467 | |
| 468 | if (ret || !write) |
| 469 | return ret; |
| 470 | |
Peter Zijlstra | b303e7c | 2016-04-04 09:57:40 +0200 | [diff] [blame] | 471 | if (sysctl_perf_cpu_time_max_percent == 100 || |
| 472 | sysctl_perf_cpu_time_max_percent == 0) { |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 473 | printk(KERN_WARNING |
| 474 | "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); |
| 475 | WRITE_ONCE(perf_sample_allowed_ns, 0); |
| 476 | } else { |
| 477 | update_perf_cpu_limits(); |
| 478 | } |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 479 | |
| 480 | return 0; |
| 481 | } |
| 482 | |
| 483 | /* |
| 484 | * perf samples are done in some very critical code paths (NMIs). |
| 485 | * If they take too much CPU time, the system can lock up and not |
| 486 | * get any real work done. This will drop the sample rate when |
| 487 | * we detect that events are taking too long. |
| 488 | */ |
| 489 | #define NR_ACCUMULATED_SAMPLES 128 |
Peter Zijlstra | d9494cb | 2013-10-17 15:36:19 +0200 | [diff] [blame] | 490 | static DEFINE_PER_CPU(u64, running_sample_length); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 491 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 492 | static u64 __report_avg; |
| 493 | static u64 __report_allowed; |
| 494 | |
Peter Zijlstra | 6a02ad66 | 2014-02-03 18:11:08 +0100 | [diff] [blame] | 495 | static void perf_duration_warn(struct irq_work *w) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 496 | { |
David Ahern | 0d87d7e | 2016-08-01 13:49:29 -0700 | [diff] [blame] | 497 | printk_ratelimited(KERN_INFO |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 498 | "perf: interrupt took too long (%lld > %lld), lowering " |
| 499 | "kernel.perf_event_max_sample_rate to %d\n", |
| 500 | __report_avg, __report_allowed, |
| 501 | sysctl_perf_event_sample_rate); |
Peter Zijlstra | 6a02ad66 | 2014-02-03 18:11:08 +0100 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); |
| 505 | |
| 506 | void perf_sample_event_took(u64 sample_len_ns) |
| 507 | { |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 508 | u64 max_len = READ_ONCE(perf_sample_allowed_ns); |
| 509 | u64 running_len; |
| 510 | u64 avg_len; |
| 511 | u32 max; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 512 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 513 | if (max_len == 0) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 514 | return; |
| 515 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 516 | /* Decay the counter by 1 average sample. */ |
| 517 | running_len = __this_cpu_read(running_sample_length); |
| 518 | running_len -= running_len/NR_ACCUMULATED_SAMPLES; |
| 519 | running_len += sample_len_ns; |
| 520 | __this_cpu_write(running_sample_length, running_len); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 521 | |
| 522 | /* |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 523 | * Note: this will be biased artifically low until we have |
| 524 | * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 525 | * from having to maintain a count. |
| 526 | */ |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 527 | avg_len = running_len/NR_ACCUMULATED_SAMPLES; |
| 528 | if (avg_len <= max_len) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 529 | return; |
| 530 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 531 | __report_avg = avg_len; |
| 532 | __report_allowed = max_len; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 533 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 534 | /* |
| 535 | * Compute a throttle threshold 25% below the current duration. |
| 536 | */ |
| 537 | avg_len += avg_len / 4; |
| 538 | max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent; |
| 539 | if (avg_len < max) |
| 540 | max /= (u32)avg_len; |
| 541 | else |
| 542 | max = 1; |
| 543 | |
| 544 | WRITE_ONCE(perf_sample_allowed_ns, avg_len); |
| 545 | WRITE_ONCE(max_samples_per_tick, max); |
| 546 | |
| 547 | sysctl_perf_event_sample_rate = max * HZ; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 548 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
| 549 | |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 550 | if (!irq_work_queue(&perf_duration_work)) { |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 551 | early_printk("perf: interrupt took too long (%lld > %lld), lowering " |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 552 | "kernel.perf_event_max_sample_rate to %d\n", |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 553 | __report_avg, __report_allowed, |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 554 | sysctl_perf_event_sample_rate); |
| 555 | } |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 556 | } |
| 557 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 558 | static atomic64_t perf_event_id; |
| 559 | |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 560 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, |
| 561 | enum event_type_t event_type); |
| 562 | |
| 563 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 564 | enum event_type_t event_type, |
| 565 | struct task_struct *task); |
| 566 | |
| 567 | static void update_context_time(struct perf_event_context *ctx); |
| 568 | static u64 perf_event_time(struct perf_event *event); |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 569 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 570 | void __weak perf_event_print_debug(void) { } |
| 571 | |
Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 572 | extern __weak const char *perf_pmu_name(void) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 573 | { |
Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 574 | return "pmu"; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 575 | } |
| 576 | |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 577 | static inline u64 perf_clock(void) |
| 578 | { |
| 579 | return local_clock(); |
| 580 | } |
| 581 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 582 | static inline u64 perf_event_clock(struct perf_event *event) |
| 583 | { |
| 584 | return event->clock(); |
| 585 | } |
| 586 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 587 | /* |
| 588 | * State based event timekeeping... |
| 589 | * |
| 590 | * The basic idea is to use event->state to determine which (if any) time |
| 591 | * fields to increment with the current delta. This means we only need to |
| 592 | * update timestamps when we change state or when they are explicitly requested |
| 593 | * (read). |
| 594 | * |
| 595 | * Event groups make things a little more complicated, but not terribly so. The |
| 596 | * rules for a group are that if the group leader is OFF the entire group is |
| 597 | * OFF, irrespecive of what the group member states are. This results in |
| 598 | * __perf_effective_state(). |
| 599 | * |
| 600 | * A futher ramification is that when a group leader flips between OFF and |
| 601 | * !OFF, we need to update all group member times. |
| 602 | * |
| 603 | * |
| 604 | * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we |
| 605 | * need to make sure the relevant context time is updated before we try and |
| 606 | * update our timestamps. |
| 607 | */ |
| 608 | |
| 609 | static __always_inline enum perf_event_state |
| 610 | __perf_effective_state(struct perf_event *event) |
| 611 | { |
| 612 | struct perf_event *leader = event->group_leader; |
| 613 | |
| 614 | if (leader->state <= PERF_EVENT_STATE_OFF) |
| 615 | return leader->state; |
| 616 | |
| 617 | return event->state; |
| 618 | } |
| 619 | |
| 620 | static __always_inline void |
| 621 | __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) |
| 622 | { |
| 623 | enum perf_event_state state = __perf_effective_state(event); |
| 624 | u64 delta = now - event->tstamp; |
| 625 | |
| 626 | *enabled = event->total_time_enabled; |
| 627 | if (state >= PERF_EVENT_STATE_INACTIVE) |
| 628 | *enabled += delta; |
| 629 | |
| 630 | *running = event->total_time_running; |
| 631 | if (state >= PERF_EVENT_STATE_ACTIVE) |
| 632 | *running += delta; |
| 633 | } |
| 634 | |
| 635 | static void perf_event_update_time(struct perf_event *event) |
| 636 | { |
| 637 | u64 now = perf_event_time(event); |
| 638 | |
| 639 | __perf_update_times(event, now, &event->total_time_enabled, |
| 640 | &event->total_time_running); |
| 641 | event->tstamp = now; |
| 642 | } |
| 643 | |
| 644 | static void perf_event_update_sibling_time(struct perf_event *leader) |
| 645 | { |
| 646 | struct perf_event *sibling; |
| 647 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 648 | for_each_sibling_event(sibling, leader) |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 649 | perf_event_update_time(sibling); |
| 650 | } |
| 651 | |
| 652 | static void |
| 653 | perf_event_set_state(struct perf_event *event, enum perf_event_state state) |
| 654 | { |
| 655 | if (event->state == state) |
| 656 | return; |
| 657 | |
| 658 | perf_event_update_time(event); |
| 659 | /* |
| 660 | * If a group leader gets enabled/disabled all its siblings |
| 661 | * are affected too. |
| 662 | */ |
| 663 | if ((event->state < 0) ^ (state < 0)) |
| 664 | perf_event_update_sibling_time(event); |
| 665 | |
| 666 | WRITE_ONCE(event->state, state); |
| 667 | } |
| 668 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 669 | #ifdef CONFIG_CGROUP_PERF |
| 670 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 671 | static inline bool |
| 672 | perf_cgroup_match(struct perf_event *event) |
| 673 | { |
| 674 | struct perf_event_context *ctx = event->ctx; |
| 675 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 676 | |
Tejun Heo | ef824fa | 2013-04-08 19:00:38 -0700 | [diff] [blame] | 677 | /* @event doesn't care about cgroup */ |
| 678 | if (!event->cgrp) |
| 679 | return true; |
| 680 | |
| 681 | /* wants specific cgroup scope but @cpuctx isn't associated with any */ |
| 682 | if (!cpuctx->cgrp) |
| 683 | return false; |
| 684 | |
| 685 | /* |
| 686 | * Cgroup scoping is recursive. An event enabled for a cgroup is |
| 687 | * also enabled for all its descendant cgroups. If @cpuctx's |
| 688 | * cgroup is a descendant of @event's (the test covers identity |
| 689 | * case), it's a match. |
| 690 | */ |
| 691 | return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, |
| 692 | event->cgrp->css.cgroup); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 693 | } |
| 694 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 695 | static inline void perf_detach_cgroup(struct perf_event *event) |
| 696 | { |
Zefan Li | 4e2ba65 | 2014-09-19 16:53:14 +0800 | [diff] [blame] | 697 | css_put(&event->cgrp->css); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 698 | event->cgrp = NULL; |
| 699 | } |
| 700 | |
| 701 | static inline int is_cgroup_event(struct perf_event *event) |
| 702 | { |
| 703 | return event->cgrp != NULL; |
| 704 | } |
| 705 | |
| 706 | static inline u64 perf_cgroup_event_time(struct perf_event *event) |
| 707 | { |
| 708 | struct perf_cgroup_info *t; |
| 709 | |
| 710 | t = per_cpu_ptr(event->cgrp->info, event->cpu); |
| 711 | return t->time; |
| 712 | } |
| 713 | |
| 714 | static inline void __update_cgrp_time(struct perf_cgroup *cgrp) |
| 715 | { |
| 716 | struct perf_cgroup_info *info; |
| 717 | u64 now; |
| 718 | |
| 719 | now = perf_clock(); |
| 720 | |
| 721 | info = this_cpu_ptr(cgrp->info); |
| 722 | |
| 723 | info->time += now - info->timestamp; |
| 724 | info->timestamp = now; |
| 725 | } |
| 726 | |
| 727 | static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) |
| 728 | { |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 729 | struct perf_cgroup *cgrp = cpuctx->cgrp; |
| 730 | struct cgroup_subsys_state *css; |
| 731 | |
| 732 | if (cgrp) { |
| 733 | for (css = &cgrp->css; css; css = css->parent) { |
| 734 | cgrp = container_of(css, struct perf_cgroup, css); |
| 735 | __update_cgrp_time(cgrp); |
| 736 | } |
| 737 | } |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 738 | } |
| 739 | |
| 740 | static inline void update_cgrp_time_from_event(struct perf_event *event) |
| 741 | { |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 742 | struct perf_cgroup *cgrp; |
| 743 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 744 | /* |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 745 | * ensure we access cgroup data only when needed and |
| 746 | * when we know the cgroup is pinned (css_get) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 747 | */ |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 748 | if (!is_cgroup_event(event)) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 749 | return; |
| 750 | |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 751 | cgrp = perf_cgroup_from_task(current, event->ctx); |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 752 | /* |
| 753 | * Do not update time when cgroup is not active |
| 754 | */ |
Colin Ian King | 28fa741 | 2018-10-29 23:32:11 +0000 | [diff] [blame] | 755 | if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 756 | __update_cgrp_time(event->cgrp); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 757 | } |
| 758 | |
| 759 | static inline void |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 760 | perf_cgroup_set_timestamp(struct task_struct *task, |
| 761 | struct perf_event_context *ctx) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 762 | { |
| 763 | struct perf_cgroup *cgrp; |
| 764 | struct perf_cgroup_info *info; |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 765 | struct cgroup_subsys_state *css; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 766 | |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 767 | /* |
| 768 | * ctx->lock held by caller |
| 769 | * ensure we do not access cgroup data |
| 770 | * unless we have the cgroup pinned (css_get) |
| 771 | */ |
| 772 | if (!task || !ctx->nr_cgroups) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 773 | return; |
| 774 | |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 775 | cgrp = perf_cgroup_from_task(task, ctx); |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 776 | |
| 777 | for (css = &cgrp->css; css; css = css->parent) { |
| 778 | cgrp = container_of(css, struct perf_cgroup, css); |
| 779 | info = this_cpu_ptr(cgrp->info); |
| 780 | info->timestamp = ctx->timestamp; |
| 781 | } |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 782 | } |
| 783 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 784 | static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); |
| 785 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 786 | #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ |
| 787 | #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ |
| 788 | |
| 789 | /* |
| 790 | * reschedule events based on the cgroup constraint of task. |
| 791 | * |
| 792 | * mode SWOUT : schedule out everything |
| 793 | * mode SWIN : schedule in based on cgroup for next |
| 794 | */ |
Geliang Tang | 18ab2cd | 2015-09-27 23:25:50 +0800 | [diff] [blame] | 795 | static void perf_cgroup_switch(struct task_struct *task, int mode) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 796 | { |
| 797 | struct perf_cpu_context *cpuctx; |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 798 | struct list_head *list; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 799 | unsigned long flags; |
| 800 | |
| 801 | /* |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 802 | * Disable interrupts and preemption to avoid this CPU's |
| 803 | * cgrp_cpuctx_entry to change under us. |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 804 | */ |
| 805 | local_irq_save(flags); |
| 806 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 807 | list = this_cpu_ptr(&cgrp_cpuctx_list); |
| 808 | list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) { |
| 809 | WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 810 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 811 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
| 812 | perf_pmu_disable(cpuctx->ctx.pmu); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 813 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 814 | if (mode & PERF_CGROUP_SWOUT) { |
| 815 | cpu_ctx_sched_out(cpuctx, EVENT_ALL); |
| 816 | /* |
| 817 | * must not be done before ctxswout due |
| 818 | * to event_filter_match() in event_sched_out() |
| 819 | */ |
| 820 | cpuctx->cgrp = NULL; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 821 | } |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 822 | |
| 823 | if (mode & PERF_CGROUP_SWIN) { |
| 824 | WARN_ON_ONCE(cpuctx->cgrp); |
| 825 | /* |
| 826 | * set cgrp before ctxsw in to allow |
| 827 | * event_filter_match() to not have to pass |
| 828 | * task around |
| 829 | * we pass the cpuctx->ctx to perf_cgroup_from_task() |
| 830 | * because cgorup events are only per-cpu |
| 831 | */ |
| 832 | cpuctx->cgrp = perf_cgroup_from_task(task, |
| 833 | &cpuctx->ctx); |
| 834 | cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); |
| 835 | } |
| 836 | perf_pmu_enable(cpuctx->ctx.pmu); |
| 837 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 838 | } |
| 839 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 840 | local_irq_restore(flags); |
| 841 | } |
| 842 | |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 843 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
| 844 | struct task_struct *next) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 845 | { |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 846 | struct perf_cgroup *cgrp1; |
| 847 | struct perf_cgroup *cgrp2 = NULL; |
| 848 | |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 849 | rcu_read_lock(); |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 850 | /* |
| 851 | * we come here when we know perf_cgroup_events > 0 |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 852 | * we do not need to pass the ctx here because we know |
| 853 | * we are holding the rcu lock |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 854 | */ |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 855 | cgrp1 = perf_cgroup_from_task(task, NULL); |
Peter Zijlstra | 70a0165 | 2016-01-08 09:29:16 +0100 | [diff] [blame] | 856 | cgrp2 = perf_cgroup_from_task(next, NULL); |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 857 | |
| 858 | /* |
| 859 | * only schedule out current cgroup events if we know |
| 860 | * that we are switching to a different cgroup. Otherwise, |
| 861 | * do no touch the cgroup events. |
| 862 | */ |
| 863 | if (cgrp1 != cgrp2) |
| 864 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 865 | |
| 866 | rcu_read_unlock(); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 867 | } |
| 868 | |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 869 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
| 870 | struct task_struct *task) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 871 | { |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 872 | struct perf_cgroup *cgrp1; |
| 873 | struct perf_cgroup *cgrp2 = NULL; |
| 874 | |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 875 | rcu_read_lock(); |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 876 | /* |
| 877 | * we come here when we know perf_cgroup_events > 0 |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 878 | * we do not need to pass the ctx here because we know |
| 879 | * we are holding the rcu lock |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 880 | */ |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 881 | cgrp1 = perf_cgroup_from_task(task, NULL); |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 882 | cgrp2 = perf_cgroup_from_task(prev, NULL); |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 883 | |
| 884 | /* |
| 885 | * only need to schedule in cgroup events if we are changing |
| 886 | * cgroup during ctxsw. Cgroup events were not scheduled |
| 887 | * out of ctxsw out if that was not the case. |
| 888 | */ |
| 889 | if (cgrp1 != cgrp2) |
| 890 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 891 | |
| 892 | rcu_read_unlock(); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 893 | } |
| 894 | |
| 895 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
| 896 | struct perf_event_attr *attr, |
| 897 | struct perf_event *group_leader) |
| 898 | { |
| 899 | struct perf_cgroup *cgrp; |
| 900 | struct cgroup_subsys_state *css; |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 901 | struct fd f = fdget(fd); |
| 902 | int ret = 0; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 903 | |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 904 | if (!f.file) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 905 | return -EBADF; |
| 906 | |
Al Viro | b583043 | 2014-10-31 01:22:04 -0400 | [diff] [blame] | 907 | css = css_tryget_online_from_dir(f.file->f_path.dentry, |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 908 | &perf_event_cgrp_subsys); |
Li Zefan | 3db272c | 2011-03-03 14:25:37 +0800 | [diff] [blame] | 909 | if (IS_ERR(css)) { |
| 910 | ret = PTR_ERR(css); |
| 911 | goto out; |
| 912 | } |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 913 | |
| 914 | cgrp = container_of(css, struct perf_cgroup, css); |
| 915 | event->cgrp = cgrp; |
| 916 | |
| 917 | /* |
| 918 | * all events in a group must monitor |
| 919 | * the same cgroup because a task belongs |
| 920 | * to only one perf cgroup at a time |
| 921 | */ |
| 922 | if (group_leader && group_leader->cgrp != cgrp) { |
| 923 | perf_detach_cgroup(event); |
| 924 | ret = -EINVAL; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 925 | } |
Li Zefan | 3db272c | 2011-03-03 14:25:37 +0800 | [diff] [blame] | 926 | out: |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 927 | fdput(f); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 928 | return ret; |
| 929 | } |
| 930 | |
| 931 | static inline void |
| 932 | perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) |
| 933 | { |
| 934 | struct perf_cgroup_info *t; |
| 935 | t = per_cpu_ptr(event->cgrp->info, event->cpu); |
| 936 | event->shadow_ctx_time = now - t->timestamp; |
| 937 | } |
| 938 | |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 939 | /* |
| 940 | * Update cpuctx->cgrp so that it is set when first cgroup event is added and |
| 941 | * cleared when last cgroup event is removed. |
| 942 | */ |
| 943 | static inline void |
| 944 | list_update_cgroup_event(struct perf_event *event, |
| 945 | struct perf_event_context *ctx, bool add) |
| 946 | { |
| 947 | struct perf_cpu_context *cpuctx; |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 948 | struct list_head *cpuctx_entry; |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 949 | |
| 950 | if (!is_cgroup_event(event)) |
| 951 | return; |
| 952 | |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 953 | /* |
| 954 | * Because cgroup events are always per-cpu events, |
Song Liu | 07c5972 | 2020-01-22 11:50:27 -0800 | [diff] [blame] | 955 | * @ctx == &cpuctx->ctx. |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 956 | */ |
Song Liu | 07c5972 | 2020-01-22 11:50:27 -0800 | [diff] [blame] | 957 | cpuctx = container_of(ctx, struct perf_cpu_context, ctx); |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 958 | |
| 959 | /* |
| 960 | * Since setting cpuctx->cgrp is conditional on the current @cgrp |
| 961 | * matching the event's cgroup, we must do this for every new event, |
| 962 | * because if the first would mismatch, the second would not try again |
| 963 | * and we would leave cpuctx->cgrp unset. |
| 964 | */ |
| 965 | if (add && !cpuctx->cgrp) { |
Tejun Heo | be96b31 | 2017-10-28 09:49:37 -0700 | [diff] [blame] | 966 | struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); |
| 967 | |
Tejun Heo | be96b31 | 2017-10-28 09:49:37 -0700 | [diff] [blame] | 968 | if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) |
| 969 | cpuctx->cgrp = cgrp; |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 970 | } |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 971 | |
| 972 | if (add && ctx->nr_cgroups++) |
| 973 | return; |
| 974 | else if (!add && --ctx->nr_cgroups) |
| 975 | return; |
| 976 | |
| 977 | /* no cgroup running */ |
| 978 | if (!add) |
| 979 | cpuctx->cgrp = NULL; |
| 980 | |
| 981 | cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; |
| 982 | if (add) |
Song Liu | 07c5972 | 2020-01-22 11:50:27 -0800 | [diff] [blame] | 983 | list_add(cpuctx_entry, |
| 984 | per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 985 | else |
| 986 | list_del(cpuctx_entry); |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 987 | } |
| 988 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 989 | #else /* !CONFIG_CGROUP_PERF */ |
| 990 | |
| 991 | static inline bool |
| 992 | perf_cgroup_match(struct perf_event *event) |
| 993 | { |
| 994 | return true; |
| 995 | } |
| 996 | |
| 997 | static inline void perf_detach_cgroup(struct perf_event *event) |
| 998 | {} |
| 999 | |
| 1000 | static inline int is_cgroup_event(struct perf_event *event) |
| 1001 | { |
| 1002 | return 0; |
| 1003 | } |
| 1004 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1005 | static inline void update_cgrp_time_from_event(struct perf_event *event) |
| 1006 | { |
| 1007 | } |
| 1008 | |
| 1009 | static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) |
| 1010 | { |
| 1011 | } |
| 1012 | |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1013 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
| 1014 | struct task_struct *next) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1015 | { |
| 1016 | } |
| 1017 | |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1018 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
| 1019 | struct task_struct *task) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1020 | { |
| 1021 | } |
| 1022 | |
| 1023 | static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, |
| 1024 | struct perf_event_attr *attr, |
| 1025 | struct perf_event *group_leader) |
| 1026 | { |
| 1027 | return -EINVAL; |
| 1028 | } |
| 1029 | |
| 1030 | static inline void |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 1031 | perf_cgroup_set_timestamp(struct task_struct *task, |
| 1032 | struct perf_event_context *ctx) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1033 | { |
| 1034 | } |
| 1035 | |
Ben Dooks (Codethink) | d00dbd29 | 2019-11-06 13:25:27 +0000 | [diff] [blame] | 1036 | static inline void |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1037 | perf_cgroup_switch(struct task_struct *task, struct task_struct *next) |
| 1038 | { |
| 1039 | } |
| 1040 | |
| 1041 | static inline void |
| 1042 | perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) |
| 1043 | { |
| 1044 | } |
| 1045 | |
| 1046 | static inline u64 perf_cgroup_event_time(struct perf_event *event) |
| 1047 | { |
| 1048 | return 0; |
| 1049 | } |
| 1050 | |
| 1051 | static inline void |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 1052 | list_update_cgroup_event(struct perf_event *event, |
| 1053 | struct perf_event_context *ctx, bool add) |
| 1054 | { |
| 1055 | } |
| 1056 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1057 | #endif |
| 1058 | |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1059 | /* |
| 1060 | * set default to be dependent on timer tick just |
| 1061 | * like original code |
| 1062 | */ |
| 1063 | #define PERF_CPU_HRTIMER (1000 / HZ) |
| 1064 | /* |
Masahiro Yamada | 8a1115f | 2017-03-09 16:16:31 -0800 | [diff] [blame] | 1065 | * function must be called with interrupts disabled |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1066 | */ |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1067 | static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1068 | { |
| 1069 | struct perf_cpu_context *cpuctx; |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 1070 | bool rotations; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1071 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 1072 | lockdep_assert_irqs_disabled(); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1073 | |
| 1074 | cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1075 | rotations = perf_rotate_context(cpuctx); |
| 1076 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1077 | raw_spin_lock(&cpuctx->hrtimer_lock); |
| 1078 | if (rotations) |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1079 | hrtimer_forward_now(hr, cpuctx->hrtimer_interval); |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1080 | else |
| 1081 | cpuctx->hrtimer_active = 0; |
| 1082 | raw_spin_unlock(&cpuctx->hrtimer_lock); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1083 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1084 | return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1085 | } |
| 1086 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1087 | static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1088 | { |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1089 | struct hrtimer *timer = &cpuctx->hrtimer; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1090 | struct pmu *pmu = cpuctx->ctx.pmu; |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1091 | u64 interval; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1092 | |
| 1093 | /* no multiplexing needed for SW PMU */ |
| 1094 | if (pmu->task_ctx_nr == perf_sw_context) |
| 1095 | return; |
| 1096 | |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 1097 | /* |
| 1098 | * check default is sane, if not set then force to |
| 1099 | * default interval (1/tick) |
| 1100 | */ |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1101 | interval = pmu->hrtimer_interval_ms; |
| 1102 | if (interval < 1) |
| 1103 | interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 1104 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1105 | cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1106 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1107 | raw_spin_lock_init(&cpuctx->hrtimer_lock); |
Sebastian Andrzej Siewior | 30f9028 | 2019-07-26 20:30:53 +0200 | [diff] [blame] | 1108 | hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1109 | timer->function = perf_mux_hrtimer_handler; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1110 | } |
| 1111 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1112 | static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1113 | { |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1114 | struct hrtimer *timer = &cpuctx->hrtimer; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1115 | struct pmu *pmu = cpuctx->ctx.pmu; |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1116 | unsigned long flags; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1117 | |
| 1118 | /* not for SW PMU */ |
| 1119 | if (pmu->task_ctx_nr == perf_sw_context) |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1120 | return 0; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1121 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1122 | raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); |
| 1123 | if (!cpuctx->hrtimer_active) { |
| 1124 | cpuctx->hrtimer_active = 1; |
| 1125 | hrtimer_forward_now(timer, cpuctx->hrtimer_interval); |
Sebastian Andrzej Siewior | 30f9028 | 2019-07-26 20:30:53 +0200 | [diff] [blame] | 1126 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1127 | } |
| 1128 | raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1129 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1130 | return 0; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1131 | } |
| 1132 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1133 | void perf_pmu_disable(struct pmu *pmu) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1134 | { |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1135 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
| 1136 | if (!(*count)++) |
| 1137 | pmu->pmu_disable(pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1138 | } |
| 1139 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1140 | void perf_pmu_enable(struct pmu *pmu) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1141 | { |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1142 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
| 1143 | if (!--(*count)) |
| 1144 | pmu->pmu_enable(pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1145 | } |
| 1146 | |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1147 | static DEFINE_PER_CPU(struct list_head, active_ctx_list); |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1148 | |
| 1149 | /* |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1150 | * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and |
| 1151 | * perf_event_task_tick() are fully serialized because they're strictly cpu |
| 1152 | * affine and perf_event_ctx{activate,deactivate} are called with IRQs |
| 1153 | * disabled, while perf_event_task_tick is called from IRQ context. |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1154 | */ |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1155 | static void perf_event_ctx_activate(struct perf_event_context *ctx) |
Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 1156 | { |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1157 | struct list_head *head = this_cpu_ptr(&active_ctx_list); |
Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 1158 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 1159 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 1160 | |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1161 | WARN_ON(!list_empty(&ctx->active_ctx_list)); |
| 1162 | |
| 1163 | list_add(&ctx->active_ctx_list, head); |
| 1164 | } |
| 1165 | |
| 1166 | static void perf_event_ctx_deactivate(struct perf_event_context *ctx) |
| 1167 | { |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 1168 | lockdep_assert_irqs_disabled(); |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1169 | |
| 1170 | WARN_ON(list_empty(&ctx->active_ctx_list)); |
| 1171 | |
| 1172 | list_del_init(&ctx->active_ctx_list); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1173 | } |
| 1174 | |
| 1175 | static void get_ctx(struct perf_event_context *ctx) |
| 1176 | { |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 1177 | refcount_inc(&ctx->refcount); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1178 | } |
| 1179 | |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 1180 | static void free_ctx(struct rcu_head *head) |
| 1181 | { |
| 1182 | struct perf_event_context *ctx; |
| 1183 | |
| 1184 | ctx = container_of(head, struct perf_event_context, rcu_head); |
| 1185 | kfree(ctx->task_ctx_data); |
| 1186 | kfree(ctx); |
| 1187 | } |
| 1188 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1189 | static void put_ctx(struct perf_event_context *ctx) |
| 1190 | { |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 1191 | if (refcount_dec_and_test(&ctx->refcount)) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1192 | if (ctx->parent_ctx) |
| 1193 | put_ctx(ctx->parent_ctx); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 1194 | if (ctx->task && ctx->task != TASK_TOMBSTONE) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1195 | put_task_struct(ctx->task); |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 1196 | call_rcu(&ctx->rcu_head, free_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1197 | } |
| 1198 | } |
| 1199 | |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 1200 | /* |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1201 | * Because of perf_event::ctx migration in sys_perf_event_open::move_group and |
| 1202 | * perf_pmu_migrate_context() we need some magic. |
| 1203 | * |
| 1204 | * Those places that change perf_event::ctx will hold both |
| 1205 | * perf_event_ctx::mutex of the 'old' and 'new' ctx value. |
| 1206 | * |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 1207 | * Lock ordering is by mutex address. There are two other sites where |
| 1208 | * perf_event_context::mutex nests and those are: |
| 1209 | * |
| 1210 | * - perf_event_exit_task_context() [ child , 0 ] |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 1211 | * perf_event_exit_event() |
| 1212 | * put_event() [ parent, 1 ] |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 1213 | * |
| 1214 | * - perf_event_init_context() [ parent, 0 ] |
| 1215 | * inherit_task_group() |
| 1216 | * inherit_group() |
| 1217 | * inherit_event() |
| 1218 | * perf_event_alloc() |
| 1219 | * perf_init_event() |
| 1220 | * perf_try_init_event() [ child , 1 ] |
| 1221 | * |
| 1222 | * While it appears there is an obvious deadlock here -- the parent and child |
| 1223 | * nesting levels are inverted between the two. This is in fact safe because |
| 1224 | * life-time rules separate them. That is an exiting task cannot fork, and a |
| 1225 | * spawning task cannot (yet) exit. |
| 1226 | * |
| 1227 | * But remember that that these are parent<->child context relations, and |
| 1228 | * migration does not affect children, therefore these two orderings should not |
| 1229 | * interact. |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1230 | * |
| 1231 | * The change in perf_event::ctx does not affect children (as claimed above) |
| 1232 | * because the sys_perf_event_open() case will install a new event and break |
| 1233 | * the ctx parent<->child relation, and perf_pmu_migrate_context() is only |
| 1234 | * concerned with cpuctx and that doesn't have children. |
| 1235 | * |
| 1236 | * The places that change perf_event::ctx will issue: |
| 1237 | * |
| 1238 | * perf_remove_from_context(); |
| 1239 | * synchronize_rcu(); |
| 1240 | * perf_install_in_context(); |
| 1241 | * |
| 1242 | * to affect the change. The remove_from_context() + synchronize_rcu() should |
| 1243 | * quiesce the event, after which we can install it in the new location. This |
| 1244 | * means that only external vectors (perf_fops, prctl) can perturb the event |
| 1245 | * while in transit. Therefore all such accessors should also acquire |
| 1246 | * perf_event_context::mutex to serialize against this. |
| 1247 | * |
| 1248 | * However; because event->ctx can change while we're waiting to acquire |
| 1249 | * ctx->mutex we must be careful and use the below perf_event_ctx_lock() |
| 1250 | * function. |
| 1251 | * |
| 1252 | * Lock order: |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 1253 | * cred_guard_mutex |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1254 | * task_struct::perf_event_mutex |
| 1255 | * perf_event_context::mutex |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1256 | * perf_event::child_mutex; |
Peter Zijlstra | 07c4a77 | 2016-01-26 12:15:37 +0100 | [diff] [blame] | 1257 | * perf_event_context::lock |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1258 | * perf_event::mmap_mutex |
| 1259 | * mmap_sem |
Alexander Shishkin | 18736ee | 2019-02-15 13:56:54 +0200 | [diff] [blame] | 1260 | * perf_addr_filters_head::lock |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 1261 | * |
| 1262 | * cpu_hotplug_lock |
| 1263 | * pmus_lock |
| 1264 | * cpuctx->mutex / perf_event_context::mutex |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1265 | */ |
Peter Zijlstra | a83fe28 | 2015-01-29 14:44:34 +0100 | [diff] [blame] | 1266 | static struct perf_event_context * |
| 1267 | perf_event_ctx_lock_nested(struct perf_event *event, int nesting) |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1268 | { |
| 1269 | struct perf_event_context *ctx; |
| 1270 | |
| 1271 | again: |
| 1272 | rcu_read_lock(); |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 1273 | ctx = READ_ONCE(event->ctx); |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 1274 | if (!refcount_inc_not_zero(&ctx->refcount)) { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1275 | rcu_read_unlock(); |
| 1276 | goto again; |
| 1277 | } |
| 1278 | rcu_read_unlock(); |
| 1279 | |
Peter Zijlstra | a83fe28 | 2015-01-29 14:44:34 +0100 | [diff] [blame] | 1280 | mutex_lock_nested(&ctx->mutex, nesting); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1281 | if (event->ctx != ctx) { |
| 1282 | mutex_unlock(&ctx->mutex); |
| 1283 | put_ctx(ctx); |
| 1284 | goto again; |
| 1285 | } |
| 1286 | |
| 1287 | return ctx; |
| 1288 | } |
| 1289 | |
Peter Zijlstra | a83fe28 | 2015-01-29 14:44:34 +0100 | [diff] [blame] | 1290 | static inline struct perf_event_context * |
| 1291 | perf_event_ctx_lock(struct perf_event *event) |
| 1292 | { |
| 1293 | return perf_event_ctx_lock_nested(event, 0); |
| 1294 | } |
| 1295 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1296 | static void perf_event_ctx_unlock(struct perf_event *event, |
| 1297 | struct perf_event_context *ctx) |
| 1298 | { |
| 1299 | mutex_unlock(&ctx->mutex); |
| 1300 | put_ctx(ctx); |
| 1301 | } |
| 1302 | |
| 1303 | /* |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 1304 | * This must be done under the ctx->lock, such as to serialize against |
| 1305 | * context_equiv(), therefore we cannot call put_ctx() since that might end up |
| 1306 | * calling scheduler related locks and ctx->lock nests inside those. |
| 1307 | */ |
| 1308 | static __must_check struct perf_event_context * |
| 1309 | unclone_ctx(struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1310 | { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 1311 | struct perf_event_context *parent_ctx = ctx->parent_ctx; |
| 1312 | |
| 1313 | lockdep_assert_held(&ctx->lock); |
| 1314 | |
| 1315 | if (parent_ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1316 | ctx->parent_ctx = NULL; |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 1317 | ctx->generation++; |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 1318 | |
| 1319 | return parent_ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1320 | } |
| 1321 | |
Oleg Nesterov | 1d95311 | 2017-08-22 17:59:28 +0200 | [diff] [blame] | 1322 | static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, |
| 1323 | enum pid_type type) |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1324 | { |
Oleg Nesterov | 1d95311 | 2017-08-22 17:59:28 +0200 | [diff] [blame] | 1325 | u32 nr; |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1326 | /* |
| 1327 | * only top level events have the pid namespace they were created in |
| 1328 | */ |
| 1329 | if (event->parent) |
| 1330 | event = event->parent; |
| 1331 | |
Oleg Nesterov | 1d95311 | 2017-08-22 17:59:28 +0200 | [diff] [blame] | 1332 | nr = __task_pid_nr_ns(p, type, event->ns); |
| 1333 | /* avoid -1 if it is idle thread or runs in another ns */ |
| 1334 | if (!nr && !pid_alive(p)) |
| 1335 | nr = -1; |
| 1336 | return nr; |
| 1337 | } |
| 1338 | |
| 1339 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) |
| 1340 | { |
Eric W. Biederman | 6883f81 | 2017-06-04 04:32:13 -0500 | [diff] [blame] | 1341 | return perf_event_pid_type(event, p, PIDTYPE_TGID); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1342 | } |
| 1343 | |
| 1344 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) |
| 1345 | { |
Oleg Nesterov | 1d95311 | 2017-08-22 17:59:28 +0200 | [diff] [blame] | 1346 | return perf_event_pid_type(event, p, PIDTYPE_PID); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1347 | } |
| 1348 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1349 | /* |
| 1350 | * If we inherit events we want to return the parent event id |
| 1351 | * to userspace. |
| 1352 | */ |
| 1353 | static u64 primary_event_id(struct perf_event *event) |
| 1354 | { |
| 1355 | u64 id = event->id; |
| 1356 | |
| 1357 | if (event->parent) |
| 1358 | id = event->parent->id; |
| 1359 | |
| 1360 | return id; |
| 1361 | } |
| 1362 | |
| 1363 | /* |
| 1364 | * Get the perf_event_context for a task and lock it. |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 1365 | * |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1366 | * This has to cope with with the fact that until it is locked, |
| 1367 | * the context could get moved to another task. |
| 1368 | */ |
| 1369 | static struct perf_event_context * |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1370 | perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1371 | { |
| 1372 | struct perf_event_context *ctx; |
| 1373 | |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1374 | retry: |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1375 | /* |
| 1376 | * One of the few rules of preemptible RCU is that one cannot do |
| 1377 | * rcu_read_unlock() while holding a scheduler (or nested) lock when |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1378 | * part of the read side critical section was irqs-enabled -- see |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1379 | * rcu_read_unlock_special(). |
| 1380 | * |
| 1381 | * Since ctx->lock nests under rq->lock we must ensure the entire read |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1382 | * side critical section has interrupts disabled. |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1383 | */ |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1384 | local_irq_save(*flags); |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1385 | rcu_read_lock(); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1386 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1387 | if (ctx) { |
| 1388 | /* |
| 1389 | * If this context is a clone of another, it might |
| 1390 | * get swapped for another underneath us by |
| 1391 | * perf_event_task_sched_out, though the |
| 1392 | * rcu_read_lock() protects us from any context |
| 1393 | * getting freed. Lock the context and check if it |
| 1394 | * got swapped before we could get the lock, and retry |
| 1395 | * if so. If we locked the right context, then it |
| 1396 | * can't get swapped on us any more. |
| 1397 | */ |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1398 | raw_spin_lock(&ctx->lock); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1399 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1400 | raw_spin_unlock(&ctx->lock); |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1401 | rcu_read_unlock(); |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1402 | local_irq_restore(*flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1403 | goto retry; |
| 1404 | } |
| 1405 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 1406 | if (ctx->task == TASK_TOMBSTONE || |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 1407 | !refcount_inc_not_zero(&ctx->refcount)) { |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1408 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1409 | ctx = NULL; |
Peter Zijlstra | 828b6f0 | 2016-01-27 21:59:04 +0100 | [diff] [blame] | 1410 | } else { |
| 1411 | WARN_ON_ONCE(ctx->task != task); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1412 | } |
| 1413 | } |
| 1414 | rcu_read_unlock(); |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1415 | if (!ctx) |
| 1416 | local_irq_restore(*flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1417 | return ctx; |
| 1418 | } |
| 1419 | |
| 1420 | /* |
| 1421 | * Get the context for a task and increment its pin_count so it |
| 1422 | * can't get swapped to another task. This also increments its |
| 1423 | * reference count so that the context can't get freed. |
| 1424 | */ |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1425 | static struct perf_event_context * |
| 1426 | perf_pin_task_context(struct task_struct *task, int ctxn) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1427 | { |
| 1428 | struct perf_event_context *ctx; |
| 1429 | unsigned long flags; |
| 1430 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1431 | ctx = perf_lock_task_context(task, ctxn, &flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1432 | if (ctx) { |
| 1433 | ++ctx->pin_count; |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1434 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1435 | } |
| 1436 | return ctx; |
| 1437 | } |
| 1438 | |
| 1439 | static void perf_unpin_context(struct perf_event_context *ctx) |
| 1440 | { |
| 1441 | unsigned long flags; |
| 1442 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1443 | raw_spin_lock_irqsave(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1444 | --ctx->pin_count; |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1445 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1446 | } |
| 1447 | |
Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 1448 | /* |
| 1449 | * Update the record of the current time in a context. |
| 1450 | */ |
| 1451 | static void update_context_time(struct perf_event_context *ctx) |
| 1452 | { |
| 1453 | u64 now = perf_clock(); |
| 1454 | |
| 1455 | ctx->time += now - ctx->timestamp; |
| 1456 | ctx->timestamp = now; |
| 1457 | } |
| 1458 | |
Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1459 | static u64 perf_event_time(struct perf_event *event) |
| 1460 | { |
| 1461 | struct perf_event_context *ctx = event->ctx; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1462 | |
| 1463 | if (is_cgroup_event(event)) |
| 1464 | return perf_cgroup_event_time(event); |
| 1465 | |
Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1466 | return ctx ? ctx->time : 0; |
| 1467 | } |
| 1468 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 1469 | static enum event_type_t get_event_type(struct perf_event *event) |
| 1470 | { |
| 1471 | struct perf_event_context *ctx = event->ctx; |
| 1472 | enum event_type_t event_type; |
| 1473 | |
| 1474 | lockdep_assert_held(&ctx->lock); |
| 1475 | |
Alexander Shishkin | 3bda69c | 2017-07-18 14:08:34 +0300 | [diff] [blame] | 1476 | /* |
| 1477 | * It's 'group type', really, because if our group leader is |
| 1478 | * pinned, so are we. |
| 1479 | */ |
| 1480 | if (event->group_leader != event) |
| 1481 | event = event->group_leader; |
| 1482 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 1483 | event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; |
| 1484 | if (!ctx->task) |
| 1485 | event_type |= EVENT_CPU; |
| 1486 | |
| 1487 | return event_type; |
| 1488 | } |
| 1489 | |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1490 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1491 | * Helper function to initialize event group nodes. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1492 | */ |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1493 | static void init_event_group(struct perf_event *event) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1494 | { |
| 1495 | RB_CLEAR_NODE(&event->group_node); |
| 1496 | event->group_index = 0; |
| 1497 | } |
| 1498 | |
| 1499 | /* |
| 1500 | * Extract pinned or flexible groups from the context |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1501 | * based on event attrs bits. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1502 | */ |
| 1503 | static struct perf_event_groups * |
| 1504 | get_event_groups(struct perf_event *event, struct perf_event_context *ctx) |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 1505 | { |
| 1506 | if (event->attr.pinned) |
| 1507 | return &ctx->pinned_groups; |
| 1508 | else |
| 1509 | return &ctx->flexible_groups; |
| 1510 | } |
| 1511 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1512 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1513 | * Helper function to initializes perf_event_group trees. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1514 | */ |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1515 | static void perf_event_groups_init(struct perf_event_groups *groups) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1516 | { |
| 1517 | groups->tree = RB_ROOT; |
| 1518 | groups->index = 0; |
| 1519 | } |
| 1520 | |
| 1521 | /* |
| 1522 | * Compare function for event groups; |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1523 | * |
| 1524 | * Implements complex key that first sorts by CPU and then by virtual index |
| 1525 | * which provides ordering when rotating groups for the same CPU. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1526 | */ |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1527 | static bool |
| 1528 | perf_event_groups_less(struct perf_event *left, struct perf_event *right) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1529 | { |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1530 | if (left->cpu < right->cpu) |
| 1531 | return true; |
| 1532 | if (left->cpu > right->cpu) |
| 1533 | return false; |
| 1534 | |
| 1535 | if (left->group_index < right->group_index) |
| 1536 | return true; |
| 1537 | if (left->group_index > right->group_index) |
| 1538 | return false; |
| 1539 | |
| 1540 | return false; |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1541 | } |
| 1542 | |
| 1543 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1544 | * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for |
| 1545 | * key (see perf_event_groups_less). This places it last inside the CPU |
| 1546 | * subtree. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1547 | */ |
| 1548 | static void |
| 1549 | perf_event_groups_insert(struct perf_event_groups *groups, |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1550 | struct perf_event *event) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1551 | { |
| 1552 | struct perf_event *node_event; |
| 1553 | struct rb_node *parent; |
| 1554 | struct rb_node **node; |
| 1555 | |
| 1556 | event->group_index = ++groups->index; |
| 1557 | |
| 1558 | node = &groups->tree.rb_node; |
| 1559 | parent = *node; |
| 1560 | |
| 1561 | while (*node) { |
| 1562 | parent = *node; |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1563 | node_event = container_of(*node, struct perf_event, group_node); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1564 | |
| 1565 | if (perf_event_groups_less(event, node_event)) |
| 1566 | node = &parent->rb_left; |
| 1567 | else |
| 1568 | node = &parent->rb_right; |
| 1569 | } |
| 1570 | |
| 1571 | rb_link_node(&event->group_node, parent, node); |
| 1572 | rb_insert_color(&event->group_node, &groups->tree); |
| 1573 | } |
| 1574 | |
| 1575 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1576 | * Helper function to insert event into the pinned or flexible groups. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1577 | */ |
| 1578 | static void |
| 1579 | add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) |
| 1580 | { |
| 1581 | struct perf_event_groups *groups; |
| 1582 | |
| 1583 | groups = get_event_groups(event, ctx); |
| 1584 | perf_event_groups_insert(groups, event); |
| 1585 | } |
| 1586 | |
| 1587 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1588 | * Delete a group from a tree. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1589 | */ |
| 1590 | static void |
| 1591 | perf_event_groups_delete(struct perf_event_groups *groups, |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1592 | struct perf_event *event) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1593 | { |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1594 | WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || |
| 1595 | RB_EMPTY_ROOT(&groups->tree)); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1596 | |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1597 | rb_erase(&event->group_node, &groups->tree); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1598 | init_event_group(event); |
| 1599 | } |
| 1600 | |
| 1601 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1602 | * Helper function to delete event from its groups. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1603 | */ |
| 1604 | static void |
| 1605 | del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) |
| 1606 | { |
| 1607 | struct perf_event_groups *groups; |
| 1608 | |
| 1609 | groups = get_event_groups(event, ctx); |
| 1610 | perf_event_groups_delete(groups, event); |
| 1611 | } |
| 1612 | |
| 1613 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1614 | * Get the leftmost event in the @cpu subtree. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1615 | */ |
| 1616 | static struct perf_event * |
| 1617 | perf_event_groups_first(struct perf_event_groups *groups, int cpu) |
| 1618 | { |
| 1619 | struct perf_event *node_event = NULL, *match = NULL; |
| 1620 | struct rb_node *node = groups->tree.rb_node; |
| 1621 | |
| 1622 | while (node) { |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1623 | node_event = container_of(node, struct perf_event, group_node); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1624 | |
| 1625 | if (cpu < node_event->cpu) { |
| 1626 | node = node->rb_left; |
| 1627 | } else if (cpu > node_event->cpu) { |
| 1628 | node = node->rb_right; |
| 1629 | } else { |
| 1630 | match = node_event; |
| 1631 | node = node->rb_left; |
| 1632 | } |
| 1633 | } |
| 1634 | |
| 1635 | return match; |
| 1636 | } |
| 1637 | |
| 1638 | /* |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 1639 | * Like rb_entry_next_safe() for the @cpu subtree. |
| 1640 | */ |
| 1641 | static struct perf_event * |
| 1642 | perf_event_groups_next(struct perf_event *event) |
| 1643 | { |
| 1644 | struct perf_event *next; |
| 1645 | |
| 1646 | next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node); |
| 1647 | if (next && next->cpu == event->cpu) |
| 1648 | return next; |
| 1649 | |
| 1650 | return NULL; |
| 1651 | } |
| 1652 | |
| 1653 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1654 | * Iterate through the whole groups tree. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1655 | */ |
Peter Zijlstra | 6e6804d | 2017-11-13 14:28:41 +0100 | [diff] [blame] | 1656 | #define perf_event_groups_for_each(event, groups) \ |
| 1657 | for (event = rb_entry_safe(rb_first(&((groups)->tree)), \ |
| 1658 | typeof(*event), group_node); event; \ |
| 1659 | event = rb_entry_safe(rb_next(&event->group_node), \ |
| 1660 | typeof(*event), group_node)) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1661 | |
| 1662 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 1663 | * Add an event from the lists for its context. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1664 | * Must be called with ctx->mutex and ctx->lock held. |
| 1665 | */ |
| 1666 | static void |
| 1667 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
| 1668 | { |
Peter Zijlstra | c994d61 | 2016-01-08 09:20:23 +0100 | [diff] [blame] | 1669 | lockdep_assert_held(&ctx->lock); |
| 1670 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1671 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); |
| 1672 | event->attach_state |= PERF_ATTACH_CONTEXT; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1673 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 1674 | event->tstamp = perf_event_time(event); |
| 1675 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1676 | /* |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1677 | * If we're a stand alone event or group leader, we go to the context |
| 1678 | * list, group events are kept attached to the group so that |
| 1679 | * perf_group_detach can, at all times, locate all siblings. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1680 | */ |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1681 | if (event->group_leader == event) { |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 1682 | event->group_caps = event->event_caps; |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1683 | add_event_to_groups(event, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1684 | } |
| 1685 | |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 1686 | list_update_cgroup_event(event, ctx, true); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1687 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1688 | list_add_rcu(&event->event_entry, &ctx->event_list); |
| 1689 | ctx->nr_events++; |
| 1690 | if (event->attr.inherit_stat) |
| 1691 | ctx->nr_stat++; |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 1692 | |
| 1693 | ctx->generation++; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1694 | } |
| 1695 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1696 | /* |
Jiri Olsa | 0231bb5 | 2013-02-01 11:23:45 +0100 | [diff] [blame] | 1697 | * Initialize event state based on the perf_event_attr::disabled. |
| 1698 | */ |
| 1699 | static inline void perf_event__state_init(struct perf_event *event) |
| 1700 | { |
| 1701 | event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : |
| 1702 | PERF_EVENT_STATE_INACTIVE; |
| 1703 | } |
| 1704 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1705 | static void __perf_event_read_size(struct perf_event *event, int nr_siblings) |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1706 | { |
| 1707 | int entry = sizeof(u64); /* value */ |
| 1708 | int size = 0; |
| 1709 | int nr = 1; |
| 1710 | |
| 1711 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 1712 | size += sizeof(u64); |
| 1713 | |
| 1714 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 1715 | size += sizeof(u64); |
| 1716 | |
| 1717 | if (event->attr.read_format & PERF_FORMAT_ID) |
| 1718 | entry += sizeof(u64); |
| 1719 | |
| 1720 | if (event->attr.read_format & PERF_FORMAT_GROUP) { |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1721 | nr += nr_siblings; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1722 | size += sizeof(u64); |
| 1723 | } |
| 1724 | |
| 1725 | size += entry * nr; |
| 1726 | event->read_size = size; |
| 1727 | } |
| 1728 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1729 | static void __perf_event_header_size(struct perf_event *event, u64 sample_type) |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1730 | { |
| 1731 | struct perf_sample_data *data; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1732 | u16 size = 0; |
| 1733 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1734 | if (sample_type & PERF_SAMPLE_IP) |
| 1735 | size += sizeof(data->ip); |
| 1736 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1737 | if (sample_type & PERF_SAMPLE_ADDR) |
| 1738 | size += sizeof(data->addr); |
| 1739 | |
| 1740 | if (sample_type & PERF_SAMPLE_PERIOD) |
| 1741 | size += sizeof(data->period); |
| 1742 | |
Andi Kleen | c3feedf | 2013-01-24 16:10:28 +0100 | [diff] [blame] | 1743 | if (sample_type & PERF_SAMPLE_WEIGHT) |
| 1744 | size += sizeof(data->weight); |
| 1745 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1746 | if (sample_type & PERF_SAMPLE_READ) |
| 1747 | size += event->read_size; |
| 1748 | |
Stephane Eranian | d6be9ad | 2013-01-24 16:10:31 +0100 | [diff] [blame] | 1749 | if (sample_type & PERF_SAMPLE_DATA_SRC) |
| 1750 | size += sizeof(data->data_src.val); |
| 1751 | |
Andi Kleen | fdfbbd0 | 2013-09-20 07:40:39 -0700 | [diff] [blame] | 1752 | if (sample_type & PERF_SAMPLE_TRANSACTION) |
| 1753 | size += sizeof(data->txn); |
| 1754 | |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 1755 | if (sample_type & PERF_SAMPLE_PHYS_ADDR) |
| 1756 | size += sizeof(data->phys_addr); |
| 1757 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1758 | event->header_size = size; |
| 1759 | } |
| 1760 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1761 | /* |
| 1762 | * Called at perf_event creation and when events are attached/detached from a |
| 1763 | * group. |
| 1764 | */ |
| 1765 | static void perf_event__header_size(struct perf_event *event) |
| 1766 | { |
| 1767 | __perf_event_read_size(event, |
| 1768 | event->group_leader->nr_siblings); |
| 1769 | __perf_event_header_size(event, event->attr.sample_type); |
| 1770 | } |
| 1771 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1772 | static void perf_event__id_header_size(struct perf_event *event) |
| 1773 | { |
| 1774 | struct perf_sample_data *data; |
| 1775 | u64 sample_type = event->attr.sample_type; |
| 1776 | u16 size = 0; |
| 1777 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1778 | if (sample_type & PERF_SAMPLE_TID) |
| 1779 | size += sizeof(data->tid_entry); |
| 1780 | |
| 1781 | if (sample_type & PERF_SAMPLE_TIME) |
| 1782 | size += sizeof(data->time); |
| 1783 | |
Adrian Hunter | ff3d527 | 2013-08-27 11:23:07 +0300 | [diff] [blame] | 1784 | if (sample_type & PERF_SAMPLE_IDENTIFIER) |
| 1785 | size += sizeof(data->id); |
| 1786 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1787 | if (sample_type & PERF_SAMPLE_ID) |
| 1788 | size += sizeof(data->id); |
| 1789 | |
| 1790 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 1791 | size += sizeof(data->stream_id); |
| 1792 | |
| 1793 | if (sample_type & PERF_SAMPLE_CPU) |
| 1794 | size += sizeof(data->cpu_entry); |
| 1795 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1796 | event->id_header_size = size; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1797 | } |
| 1798 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1799 | static bool perf_event_validate_size(struct perf_event *event) |
| 1800 | { |
| 1801 | /* |
| 1802 | * The values computed here will be over-written when we actually |
| 1803 | * attach the event. |
| 1804 | */ |
| 1805 | __perf_event_read_size(event, event->group_leader->nr_siblings + 1); |
| 1806 | __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); |
| 1807 | perf_event__id_header_size(event); |
| 1808 | |
| 1809 | /* |
| 1810 | * Sum the lot; should not exceed the 64k limit we have on records. |
| 1811 | * Conservative limit to allow for callchains and other variable fields. |
| 1812 | */ |
| 1813 | if (event->read_size + event->header_size + |
| 1814 | event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) |
| 1815 | return false; |
| 1816 | |
| 1817 | return true; |
| 1818 | } |
| 1819 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1820 | static void perf_group_attach(struct perf_event *event) |
| 1821 | { |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1822 | struct perf_event *group_leader = event->group_leader, *pos; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1823 | |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 1824 | lockdep_assert_held(&event->ctx->lock); |
| 1825 | |
Peter Zijlstra | 74c3337 | 2010-10-15 11:40:29 +0200 | [diff] [blame] | 1826 | /* |
| 1827 | * We can have double attach due to group movement in perf_event_open. |
| 1828 | */ |
| 1829 | if (event->attach_state & PERF_ATTACH_GROUP) |
| 1830 | return; |
| 1831 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1832 | event->attach_state |= PERF_ATTACH_GROUP; |
| 1833 | |
| 1834 | if (group_leader == event) |
| 1835 | return; |
| 1836 | |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 1837 | WARN_ON_ONCE(group_leader->ctx != event->ctx); |
| 1838 | |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 1839 | group_leader->group_caps &= event->event_caps; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1840 | |
Peter Zijlstra | 8343aae | 2017-11-13 14:28:33 +0100 | [diff] [blame] | 1841 | list_add_tail(&event->sibling_list, &group_leader->sibling_list); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1842 | group_leader->nr_siblings++; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1843 | |
| 1844 | perf_event__header_size(group_leader); |
| 1845 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 1846 | for_each_sibling_event(pos, group_leader) |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1847 | perf_event__header_size(pos); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1848 | } |
| 1849 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1850 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 1851 | * Remove an event from the lists for its context. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1852 | * Must be called with ctx->mutex and ctx->lock held. |
| 1853 | */ |
| 1854 | static void |
| 1855 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) |
| 1856 | { |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 1857 | WARN_ON_ONCE(event->ctx != ctx); |
| 1858 | lockdep_assert_held(&ctx->lock); |
| 1859 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1860 | /* |
| 1861 | * We can have double detach due to exit/hot-unplug + close. |
| 1862 | */ |
| 1863 | if (!(event->attach_state & PERF_ATTACH_CONTEXT)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1864 | return; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1865 | |
| 1866 | event->attach_state &= ~PERF_ATTACH_CONTEXT; |
| 1867 | |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 1868 | list_update_cgroup_event(event, ctx, false); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1869 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1870 | ctx->nr_events--; |
| 1871 | if (event->attr.inherit_stat) |
| 1872 | ctx->nr_stat--; |
| 1873 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1874 | list_del_rcu(&event->event_entry); |
| 1875 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1876 | if (event->group_leader == event) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1877 | del_event_from_groups(event, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1878 | |
Stephane Eranian | b2e74a2 | 2009-11-26 09:24:30 -0800 | [diff] [blame] | 1879 | /* |
| 1880 | * If event was in error state, then keep it |
| 1881 | * that way, otherwise bogus counts will be |
| 1882 | * returned on read(). The only way to get out |
| 1883 | * of error state is by explicit re-enabling |
| 1884 | * of the event |
| 1885 | */ |
| 1886 | if (event->state > PERF_EVENT_STATE_OFF) |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 1887 | perf_event_set_state(event, PERF_EVENT_STATE_OFF); |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 1888 | |
| 1889 | ctx->generation++; |
Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 1890 | } |
| 1891 | |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 1892 | static int |
| 1893 | perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) |
| 1894 | { |
| 1895 | if (!has_aux(aux_event)) |
| 1896 | return 0; |
| 1897 | |
| 1898 | if (!event->pmu->aux_output_match) |
| 1899 | return 0; |
| 1900 | |
| 1901 | return event->pmu->aux_output_match(aux_event); |
| 1902 | } |
| 1903 | |
| 1904 | static void put_event(struct perf_event *event); |
| 1905 | static void event_sched_out(struct perf_event *event, |
| 1906 | struct perf_cpu_context *cpuctx, |
| 1907 | struct perf_event_context *ctx); |
| 1908 | |
| 1909 | static void perf_put_aux_event(struct perf_event *event) |
| 1910 | { |
| 1911 | struct perf_event_context *ctx = event->ctx; |
| 1912 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 1913 | struct perf_event *iter; |
| 1914 | |
| 1915 | /* |
| 1916 | * If event uses aux_event tear down the link |
| 1917 | */ |
| 1918 | if (event->aux_event) { |
| 1919 | iter = event->aux_event; |
| 1920 | event->aux_event = NULL; |
| 1921 | put_event(iter); |
| 1922 | return; |
| 1923 | } |
| 1924 | |
| 1925 | /* |
| 1926 | * If the event is an aux_event, tear down all links to |
| 1927 | * it from other events. |
| 1928 | */ |
| 1929 | for_each_sibling_event(iter, event->group_leader) { |
| 1930 | if (iter->aux_event != event) |
| 1931 | continue; |
| 1932 | |
| 1933 | iter->aux_event = NULL; |
| 1934 | put_event(event); |
| 1935 | |
| 1936 | /* |
| 1937 | * If it's ACTIVE, schedule it out and put it into ERROR |
| 1938 | * state so that we don't try to schedule it again. Note |
| 1939 | * that perf_event_enable() will clear the ERROR status. |
| 1940 | */ |
| 1941 | event_sched_out(iter, cpuctx, ctx); |
| 1942 | perf_event_set_state(event, PERF_EVENT_STATE_ERROR); |
| 1943 | } |
| 1944 | } |
| 1945 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 1946 | static bool perf_need_aux_event(struct perf_event *event) |
| 1947 | { |
| 1948 | return !!event->attr.aux_output || !!event->attr.aux_sample_size; |
| 1949 | } |
| 1950 | |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 1951 | static int perf_get_aux_event(struct perf_event *event, |
| 1952 | struct perf_event *group_leader) |
| 1953 | { |
| 1954 | /* |
| 1955 | * Our group leader must be an aux event if we want to be |
| 1956 | * an aux_output. This way, the aux event will precede its |
| 1957 | * aux_output events in the group, and therefore will always |
| 1958 | * schedule first. |
| 1959 | */ |
| 1960 | if (!group_leader) |
| 1961 | return 0; |
| 1962 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 1963 | /* |
| 1964 | * aux_output and aux_sample_size are mutually exclusive. |
| 1965 | */ |
| 1966 | if (event->attr.aux_output && event->attr.aux_sample_size) |
| 1967 | return 0; |
| 1968 | |
| 1969 | if (event->attr.aux_output && |
| 1970 | !perf_aux_output_match(event, group_leader)) |
| 1971 | return 0; |
| 1972 | |
| 1973 | if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 1974 | return 0; |
| 1975 | |
| 1976 | if (!atomic_long_inc_not_zero(&group_leader->refcount)) |
| 1977 | return 0; |
| 1978 | |
| 1979 | /* |
| 1980 | * Link aux_outputs to their aux event; this is undone in |
| 1981 | * perf_group_detach() by perf_put_aux_event(). When the |
| 1982 | * group in torn down, the aux_output events loose their |
| 1983 | * link to the aux_event and can't schedule any more. |
| 1984 | */ |
| 1985 | event->aux_event = group_leader; |
| 1986 | |
| 1987 | return 1; |
| 1988 | } |
| 1989 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 1990 | static inline struct list_head *get_event_list(struct perf_event *event) |
| 1991 | { |
| 1992 | struct perf_event_context *ctx = event->ctx; |
| 1993 | return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; |
| 1994 | } |
| 1995 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1996 | static void perf_group_detach(struct perf_event *event) |
Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 1997 | { |
| 1998 | struct perf_event *sibling, *tmp; |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 1999 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2000 | |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 2001 | lockdep_assert_held(&ctx->lock); |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2002 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2003 | /* |
| 2004 | * We can have double detach due to exit/hot-unplug + close. |
| 2005 | */ |
| 2006 | if (!(event->attach_state & PERF_ATTACH_GROUP)) |
| 2007 | return; |
| 2008 | |
| 2009 | event->attach_state &= ~PERF_ATTACH_GROUP; |
| 2010 | |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 2011 | perf_put_aux_event(event); |
| 2012 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2013 | /* |
| 2014 | * If this is a sibling, remove it from its group. |
| 2015 | */ |
| 2016 | if (event->group_leader != event) { |
Peter Zijlstra | 8343aae | 2017-11-13 14:28:33 +0100 | [diff] [blame] | 2017 | list_del_init(&event->sibling_list); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2018 | event->group_leader->nr_siblings--; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2019 | goto out; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2020 | } |
| 2021 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2022 | /* |
| 2023 | * If this was a group event with sibling events then |
| 2024 | * upgrade the siblings to singleton events by adding them |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2025 | * to whatever list we are on. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2026 | */ |
Peter Zijlstra | 8343aae | 2017-11-13 14:28:33 +0100 | [diff] [blame] | 2027 | list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2028 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2029 | sibling->group_leader = sibling; |
Mark Rutland | 2486836 | 2018-03-16 12:51:40 +0000 | [diff] [blame] | 2030 | list_del_init(&sibling->sibling_list); |
Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 2031 | |
| 2032 | /* Inherit group flags from the previous leader */ |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 2033 | sibling->group_caps = event->group_caps; |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 2034 | |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2035 | if (!RB_EMPTY_NODE(&event->group_node)) { |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2036 | add_event_to_groups(sibling, event->ctx); |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 2037 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 2038 | if (sibling->state == PERF_EVENT_STATE_ACTIVE) |
| 2039 | list_add_tail(&sibling->active_list, get_event_list(sibling)); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2040 | } |
| 2041 | |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 2042 | WARN_ON_ONCE(sibling->ctx != event->ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2043 | } |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2044 | |
| 2045 | out: |
| 2046 | perf_event__header_size(event->group_leader); |
| 2047 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2048 | for_each_sibling_event(tmp, event->group_leader) |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2049 | perf_event__header_size(tmp); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2050 | } |
| 2051 | |
Jiri Olsa | fadfe7b | 2014-08-01 14:33:02 +0200 | [diff] [blame] | 2052 | static bool is_orphaned_event(struct perf_event *event) |
| 2053 | { |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 2054 | return event->state == PERF_EVENT_STATE_DEAD; |
Jiri Olsa | fadfe7b | 2014-08-01 14:33:02 +0200 | [diff] [blame] | 2055 | } |
| 2056 | |
Mark Rutland | 2c81a64 | 2016-06-14 16:10:41 +0100 | [diff] [blame] | 2057 | static inline int __pmu_filter_match(struct perf_event *event) |
Mark Rutland | 66eb579 | 2015-05-13 17:12:23 +0100 | [diff] [blame] | 2058 | { |
| 2059 | struct pmu *pmu = event->pmu; |
| 2060 | return pmu->filter_match ? pmu->filter_match(event) : 1; |
| 2061 | } |
| 2062 | |
Mark Rutland | 2c81a64 | 2016-06-14 16:10:41 +0100 | [diff] [blame] | 2063 | /* |
| 2064 | * Check whether we should attempt to schedule an event group based on |
| 2065 | * PMU-specific filtering. An event group can consist of HW and SW events, |
| 2066 | * potentially with a SW leader, so we must check all the filters, to |
| 2067 | * determine whether a group is schedulable: |
| 2068 | */ |
| 2069 | static inline int pmu_filter_match(struct perf_event *event) |
| 2070 | { |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2071 | struct perf_event *sibling; |
Mark Rutland | 2c81a64 | 2016-06-14 16:10:41 +0100 | [diff] [blame] | 2072 | |
| 2073 | if (!__pmu_filter_match(event)) |
| 2074 | return 0; |
| 2075 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2076 | for_each_sibling_event(sibling, event) { |
| 2077 | if (!__pmu_filter_match(sibling)) |
Mark Rutland | 2c81a64 | 2016-06-14 16:10:41 +0100 | [diff] [blame] | 2078 | return 0; |
| 2079 | } |
| 2080 | |
| 2081 | return 1; |
| 2082 | } |
| 2083 | |
Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 2084 | static inline int |
| 2085 | event_filter_match(struct perf_event *event) |
| 2086 | { |
Peter Zijlstra | 0b8f1e2 | 2016-08-04 14:37:24 +0200 | [diff] [blame] | 2087 | return (event->cpu == -1 || event->cpu == smp_processor_id()) && |
| 2088 | perf_cgroup_match(event) && pmu_filter_match(event); |
Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 2089 | } |
| 2090 | |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2091 | static void |
| 2092 | event_sched_out(struct perf_event *event, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2093 | struct perf_cpu_context *cpuctx, |
| 2094 | struct perf_event_context *ctx) |
| 2095 | { |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2096 | enum perf_event_state state = PERF_EVENT_STATE_INACTIVE; |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 2097 | |
| 2098 | WARN_ON_ONCE(event->ctx != ctx); |
| 2099 | lockdep_assert_held(&ctx->lock); |
| 2100 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2101 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2102 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2103 | |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 2104 | /* |
| 2105 | * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but |
| 2106 | * we can schedule events _OUT_ individually through things like |
| 2107 | * __perf_remove_from_context(). |
| 2108 | */ |
| 2109 | list_del_init(&event->active_list); |
| 2110 | |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2111 | perf_pmu_disable(event->pmu); |
| 2112 | |
Peter Zijlstra | 28a967c | 2016-02-24 18:45:46 +0100 | [diff] [blame] | 2113 | event->pmu->del(event, 0); |
| 2114 | event->oncpu = -1; |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2115 | |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 2116 | if (READ_ONCE(event->pending_disable) >= 0) { |
| 2117 | WRITE_ONCE(event->pending_disable, -1); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2118 | state = PERF_EVENT_STATE_OFF; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2119 | } |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2120 | perf_event_set_state(event, state); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2121 | |
| 2122 | if (!is_software_event(event)) |
| 2123 | cpuctx->active_oncpu--; |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 2124 | if (!--ctx->nr_active) |
| 2125 | perf_event_ctx_deactivate(ctx); |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 2126 | if (event->attr.freq && event->attr.sample_freq) |
| 2127 | ctx->nr_freq--; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2128 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
| 2129 | cpuctx->exclusive = 0; |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2130 | |
| 2131 | perf_pmu_enable(event->pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2132 | } |
| 2133 | |
| 2134 | static void |
| 2135 | group_sched_out(struct perf_event *group_event, |
| 2136 | struct perf_cpu_context *cpuctx, |
| 2137 | struct perf_event_context *ctx) |
| 2138 | { |
| 2139 | struct perf_event *event; |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2140 | |
| 2141 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) |
| 2142 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2143 | |
Mark Rutland | 3f005e7 | 2016-07-26 18:12:21 +0100 | [diff] [blame] | 2144 | perf_pmu_disable(ctx->pmu); |
| 2145 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2146 | event_sched_out(group_event, cpuctx, ctx); |
| 2147 | |
| 2148 | /* |
| 2149 | * Schedule out siblings (if any): |
| 2150 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2151 | for_each_sibling_event(event, group_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2152 | event_sched_out(event, cpuctx, ctx); |
| 2153 | |
Mark Rutland | 3f005e7 | 2016-07-26 18:12:21 +0100 | [diff] [blame] | 2154 | perf_pmu_enable(ctx->pmu); |
| 2155 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2156 | if (group_event->attr.exclusive) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2157 | cpuctx->exclusive = 0; |
| 2158 | } |
| 2159 | |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2160 | #define DETACH_GROUP 0x01UL |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 2161 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2162 | /* |
| 2163 | * Cross CPU call to remove a performance event |
| 2164 | * |
| 2165 | * We disable the event on the hardware level first. After that we |
| 2166 | * remove it from the context list. |
| 2167 | */ |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2168 | static void |
| 2169 | __perf_remove_from_context(struct perf_event *event, |
| 2170 | struct perf_cpu_context *cpuctx, |
| 2171 | struct perf_event_context *ctx, |
| 2172 | void *info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2173 | { |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2174 | unsigned long flags = (unsigned long)info; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2175 | |
Peter Zijlstra | 3c5c871 | 2017-09-05 13:44:51 +0200 | [diff] [blame] | 2176 | if (ctx->is_active & EVENT_TIME) { |
| 2177 | update_context_time(ctx); |
| 2178 | update_cgrp_time_from_cpuctx(cpuctx); |
| 2179 | } |
| 2180 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2181 | event_sched_out(event, cpuctx, ctx); |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2182 | if (flags & DETACH_GROUP) |
Peter Zijlstra | 46ce0fe | 2014-05-02 16:56:01 +0200 | [diff] [blame] | 2183 | perf_group_detach(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2184 | list_del_event(event, ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2185 | |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2186 | if (!ctx->nr_events && ctx->is_active) { |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2187 | ctx->is_active = 0; |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2188 | if (ctx->task) { |
| 2189 | WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
| 2190 | cpuctx->task_ctx = NULL; |
| 2191 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2192 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2193 | } |
| 2194 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2195 | /* |
| 2196 | * Remove the event from a task's (or a CPU's) list of events. |
| 2197 | * |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2198 | * If event->ctx is a cloned context, callers must make sure that |
| 2199 | * every task struct that event->ctx->task could possibly point to |
| 2200 | * remains valid. This is OK when called from perf_release since |
| 2201 | * that only calls us on the top-level context, which can't be a clone. |
| 2202 | * When called from perf_event_exit_task, it's OK because the |
| 2203 | * context has been detached from its task. |
| 2204 | */ |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2205 | static void perf_remove_from_context(struct perf_event *event, unsigned long flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2206 | { |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2207 | struct perf_event_context *ctx = event->ctx; |
| 2208 | |
| 2209 | lockdep_assert_held(&ctx->mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2210 | |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2211 | event_function_call(event, __perf_remove_from_context, (void *)flags); |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2212 | |
| 2213 | /* |
| 2214 | * The above event_function_call() can NO-OP when it hits |
| 2215 | * TASK_TOMBSTONE. In that case we must already have been detached |
| 2216 | * from the context (by perf_event_exit_event()) but the grouping |
| 2217 | * might still be in-tact. |
| 2218 | */ |
| 2219 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); |
| 2220 | if ((flags & DETACH_GROUP) && |
| 2221 | (event->attach_state & PERF_ATTACH_GROUP)) { |
| 2222 | /* |
| 2223 | * Since in that case we cannot possibly be scheduled, simply |
| 2224 | * detach now. |
| 2225 | */ |
| 2226 | raw_spin_lock_irq(&ctx->lock); |
| 2227 | perf_group_detach(event); |
| 2228 | raw_spin_unlock_irq(&ctx->lock); |
| 2229 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2230 | } |
| 2231 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2232 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2233 | * Cross CPU call to disable a performance event |
| 2234 | */ |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2235 | static void __perf_event_disable(struct perf_event *event, |
| 2236 | struct perf_cpu_context *cpuctx, |
| 2237 | struct perf_event_context *ctx, |
| 2238 | void *info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2239 | { |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2240 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 2241 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2242 | |
Peter Zijlstra | 3c5c871 | 2017-09-05 13:44:51 +0200 | [diff] [blame] | 2243 | if (ctx->is_active & EVENT_TIME) { |
| 2244 | update_context_time(ctx); |
| 2245 | update_cgrp_time_from_event(event); |
| 2246 | } |
| 2247 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2248 | if (event == event->group_leader) |
| 2249 | group_sched_out(event, cpuctx, ctx); |
| 2250 | else |
| 2251 | event_sched_out(event, cpuctx, ctx); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2252 | |
| 2253 | perf_event_set_state(event, PERF_EVENT_STATE_OFF); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2254 | } |
| 2255 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2256 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 2257 | * Disable an event. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2258 | * |
| 2259 | * If event->ctx is a cloned context, callers must make sure that |
| 2260 | * every task struct that event->ctx->task could possibly point to |
Roy Ben Shlomo | 9f014e3 | 2019-09-20 20:12:53 +0300 | [diff] [blame] | 2261 | * remains valid. This condition is satisfied when called through |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2262 | * perf_event_for_each_child or perf_event_for_each because they |
| 2263 | * hold the top-level event's child_mutex, so any descendant that |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 2264 | * goes to exit will block in perf_event_exit_event(). |
| 2265 | * |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2266 | * When called from perf_pending_event it's OK because event->ctx |
| 2267 | * is the current context on this CPU and preemption is disabled, |
| 2268 | * hence we can't get into perf_event_task_sched_out for this context. |
| 2269 | */ |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2270 | static void _perf_event_disable(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2271 | { |
| 2272 | struct perf_event_context *ctx = event->ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2273 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2274 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2275 | if (event->state <= PERF_EVENT_STATE_OFF) { |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2276 | raw_spin_unlock_irq(&ctx->lock); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2277 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2278 | } |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2279 | raw_spin_unlock_irq(&ctx->lock); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2280 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2281 | event_function_call(event, __perf_event_disable, NULL); |
| 2282 | } |
| 2283 | |
| 2284 | void perf_event_disable_local(struct perf_event *event) |
| 2285 | { |
| 2286 | event_function_local(event, __perf_event_disable, NULL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2287 | } |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2288 | |
| 2289 | /* |
| 2290 | * Strictly speaking kernel users cannot create groups and therefore this |
| 2291 | * interface does not need the perf_event_ctx_lock() magic. |
| 2292 | */ |
| 2293 | void perf_event_disable(struct perf_event *event) |
| 2294 | { |
| 2295 | struct perf_event_context *ctx; |
| 2296 | |
| 2297 | ctx = perf_event_ctx_lock(event); |
| 2298 | _perf_event_disable(event); |
| 2299 | perf_event_ctx_unlock(event, ctx); |
| 2300 | } |
Robert Richter | dcfce4a | 2011-10-11 17:11:08 +0200 | [diff] [blame] | 2301 | EXPORT_SYMBOL_GPL(perf_event_disable); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2302 | |
Jiri Olsa | 5aab90c | 2016-10-26 11:48:24 +0200 | [diff] [blame] | 2303 | void perf_event_disable_inatomic(struct perf_event *event) |
| 2304 | { |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 2305 | WRITE_ONCE(event->pending_disable, smp_processor_id()); |
| 2306 | /* can fail, see perf_pending_event_disable() */ |
Jiri Olsa | 5aab90c | 2016-10-26 11:48:24 +0200 | [diff] [blame] | 2307 | irq_work_queue(&event->pending); |
| 2308 | } |
| 2309 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2310 | static void perf_set_shadow_time(struct perf_event *event, |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2311 | struct perf_event_context *ctx) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2312 | { |
| 2313 | /* |
| 2314 | * use the correct time source for the time snapshot |
| 2315 | * |
| 2316 | * We could get by without this by leveraging the |
| 2317 | * fact that to get to this function, the caller |
| 2318 | * has most likely already called update_context_time() |
| 2319 | * and update_cgrp_time_xx() and thus both timestamp |
| 2320 | * are identical (or very close). Given that tstamp is, |
| 2321 | * already adjusted for cgroup, we could say that: |
| 2322 | * tstamp - ctx->timestamp |
| 2323 | * is equivalent to |
| 2324 | * tstamp - cgrp->timestamp. |
| 2325 | * |
| 2326 | * Then, in perf_output_read(), the calculation would |
| 2327 | * work with no changes because: |
| 2328 | * - event is guaranteed scheduled in |
| 2329 | * - no scheduled out in between |
| 2330 | * - thus the timestamp would be the same |
| 2331 | * |
| 2332 | * But this is a bit hairy. |
| 2333 | * |
| 2334 | * So instead, we have an explicit cgroup call to remain |
| 2335 | * within the time time source all along. We believe it |
| 2336 | * is cleaner and simpler to understand. |
| 2337 | */ |
| 2338 | if (is_cgroup_event(event)) |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2339 | perf_cgroup_set_shadow_time(event, event->tstamp); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2340 | else |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2341 | event->shadow_ctx_time = event->tstamp - ctx->timestamp; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2342 | } |
| 2343 | |
Peter Zijlstra | 4fe757d | 2011-02-15 22:26:07 +0100 | [diff] [blame] | 2344 | #define MAX_INTERRUPTS (~0ULL) |
| 2345 | |
| 2346 | static void perf_log_throttle(struct perf_event *event, int enable); |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 2347 | static void perf_log_itrace_start(struct perf_event *event); |
Peter Zijlstra | 4fe757d | 2011-02-15 22:26:07 +0100 | [diff] [blame] | 2348 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2349 | static int |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2350 | event_sched_in(struct perf_event *event, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2351 | struct perf_cpu_context *cpuctx, |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2352 | struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2353 | { |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2354 | int ret = 0; |
Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 2355 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 2356 | WARN_ON_ONCE(event->ctx != ctx); |
| 2357 | |
Peter Zijlstra | 6334241 | 2014-05-05 11:49:16 +0200 | [diff] [blame] | 2358 | lockdep_assert_held(&ctx->lock); |
| 2359 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2360 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 2361 | return 0; |
| 2362 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 2363 | WRITE_ONCE(event->oncpu, smp_processor_id()); |
| 2364 | /* |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 2365 | * Order event::oncpu write to happen before the ACTIVE state is |
| 2366 | * visible. This allows perf_event_{stop,read}() to observe the correct |
| 2367 | * ->oncpu if it sees ACTIVE. |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 2368 | */ |
| 2369 | smp_wmb(); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2370 | perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); |
Peter Zijlstra | 4fe757d | 2011-02-15 22:26:07 +0100 | [diff] [blame] | 2371 | |
| 2372 | /* |
| 2373 | * Unthrottle events, since we scheduled we might have missed several |
| 2374 | * ticks already, also for a heavily scheduling task there is little |
| 2375 | * guarantee it'll get a tick in a timely manner. |
| 2376 | */ |
| 2377 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { |
| 2378 | perf_log_throttle(event, 1); |
| 2379 | event->hw.interrupts = 0; |
| 2380 | } |
| 2381 | |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2382 | perf_pmu_disable(event->pmu); |
| 2383 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2384 | perf_set_shadow_time(event, ctx); |
Shaohua Li | 72f669c | 2015-02-05 15:55:31 -0800 | [diff] [blame] | 2385 | |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 2386 | perf_log_itrace_start(event); |
| 2387 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 2388 | if (event->pmu->add(event, PERF_EF_START)) { |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2389 | perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2390 | event->oncpu = -1; |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2391 | ret = -EAGAIN; |
| 2392 | goto out; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2393 | } |
| 2394 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2395 | if (!is_software_event(event)) |
| 2396 | cpuctx->active_oncpu++; |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 2397 | if (!ctx->nr_active++) |
| 2398 | perf_event_ctx_activate(ctx); |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 2399 | if (event->attr.freq && event->attr.sample_freq) |
| 2400 | ctx->nr_freq++; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2401 | |
| 2402 | if (event->attr.exclusive) |
| 2403 | cpuctx->exclusive = 1; |
| 2404 | |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2405 | out: |
| 2406 | perf_pmu_enable(event->pmu); |
| 2407 | |
| 2408 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2409 | } |
| 2410 | |
| 2411 | static int |
| 2412 | group_sched_in(struct perf_event *group_event, |
| 2413 | struct perf_cpu_context *cpuctx, |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2414 | struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2415 | { |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 2416 | struct perf_event *event, *partial_group = NULL; |
Peter Zijlstra | 4a23459 | 2014-02-24 12:43:31 +0100 | [diff] [blame] | 2417 | struct pmu *pmu = ctx->pmu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2418 | |
| 2419 | if (group_event->state == PERF_EVENT_STATE_OFF) |
| 2420 | return 0; |
| 2421 | |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 2422 | pmu->start_txn(pmu, PERF_PMU_TXN_ADD); |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 2423 | |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2424 | if (event_sched_in(group_event, cpuctx, ctx)) { |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 2425 | pmu->cancel_txn(pmu); |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 2426 | perf_mux_hrtimer_restart(cpuctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2427 | return -EAGAIN; |
Stephane Eranian | 90151c35 | 2010-05-25 16:23:10 +0200 | [diff] [blame] | 2428 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2429 | |
| 2430 | /* |
| 2431 | * Schedule in siblings as one group (if any): |
| 2432 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2433 | for_each_sibling_event(event, group_event) { |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2434 | if (event_sched_in(event, cpuctx, ctx)) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2435 | partial_group = event; |
| 2436 | goto group_error; |
| 2437 | } |
| 2438 | } |
| 2439 | |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2440 | if (!pmu->commit_txn(pmu)) |
Paul Mackerras | 6e85158 | 2010-05-08 20:58:00 +1000 | [diff] [blame] | 2441 | return 0; |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2442 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2443 | group_error: |
| 2444 | /* |
| 2445 | * Groups can be scheduled in as one unit only, so undo any |
| 2446 | * partial group before returning: |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2447 | * The events up to the failed event are scheduled out normally. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2448 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2449 | for_each_sibling_event(event, group_event) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2450 | if (event == partial_group) |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2451 | break; |
Stephane Eranian | d7842da | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2452 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2453 | event_sched_out(event, cpuctx, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2454 | } |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2455 | event_sched_out(group_event, cpuctx, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2456 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 2457 | pmu->cancel_txn(pmu); |
Stephane Eranian | 90151c35 | 2010-05-25 16:23:10 +0200 | [diff] [blame] | 2458 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 2459 | perf_mux_hrtimer_restart(cpuctx); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 2460 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2461 | return -EAGAIN; |
| 2462 | } |
| 2463 | |
| 2464 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2465 | * Work out whether we can put this event group on the CPU now. |
| 2466 | */ |
| 2467 | static int group_can_go_on(struct perf_event *event, |
| 2468 | struct perf_cpu_context *cpuctx, |
| 2469 | int can_add_hw) |
| 2470 | { |
| 2471 | /* |
| 2472 | * Groups consisting entirely of software events can always go on. |
| 2473 | */ |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 2474 | if (event->group_caps & PERF_EV_CAP_SOFTWARE) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2475 | return 1; |
| 2476 | /* |
| 2477 | * If an exclusive group is already on, no other hardware |
| 2478 | * events can go on. |
| 2479 | */ |
| 2480 | if (cpuctx->exclusive) |
| 2481 | return 0; |
| 2482 | /* |
| 2483 | * If this group is exclusive and there are already |
| 2484 | * events on the CPU, it can't go on. |
| 2485 | */ |
| 2486 | if (event->attr.exclusive && cpuctx->active_oncpu) |
| 2487 | return 0; |
| 2488 | /* |
| 2489 | * Otherwise, try to add it if all previous groups were able |
| 2490 | * to go on. |
| 2491 | */ |
| 2492 | return can_add_hw; |
| 2493 | } |
| 2494 | |
| 2495 | static void add_event_to_ctx(struct perf_event *event, |
| 2496 | struct perf_event_context *ctx) |
| 2497 | { |
| 2498 | list_add_event(event, ctx); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2499 | perf_group_attach(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2500 | } |
| 2501 | |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2502 | static void ctx_sched_out(struct perf_event_context *ctx, |
| 2503 | struct perf_cpu_context *cpuctx, |
| 2504 | enum event_type_t event_type); |
Peter Zijlstra | 2c29ef0 | 2011-04-09 21:17:44 +0200 | [diff] [blame] | 2505 | static void |
| 2506 | ctx_sched_in(struct perf_event_context *ctx, |
| 2507 | struct perf_cpu_context *cpuctx, |
| 2508 | enum event_type_t event_type, |
| 2509 | struct task_struct *task); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2510 | |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2511 | static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2512 | struct perf_event_context *ctx, |
| 2513 | enum event_type_t event_type) |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2514 | { |
| 2515 | if (!cpuctx->task_ctx) |
| 2516 | return; |
| 2517 | |
| 2518 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) |
| 2519 | return; |
| 2520 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2521 | ctx_sched_out(ctx, cpuctx, event_type); |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2522 | } |
| 2523 | |
Peter Zijlstra | dce5855 | 2011-04-09 21:17:46 +0200 | [diff] [blame] | 2524 | static void perf_event_sched_in(struct perf_cpu_context *cpuctx, |
| 2525 | struct perf_event_context *ctx, |
| 2526 | struct task_struct *task) |
| 2527 | { |
| 2528 | cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); |
| 2529 | if (ctx) |
| 2530 | ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); |
| 2531 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); |
| 2532 | if (ctx) |
| 2533 | ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); |
| 2534 | } |
| 2535 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2536 | /* |
| 2537 | * We want to maintain the following priority of scheduling: |
| 2538 | * - CPU pinned (EVENT_CPU | EVENT_PINNED) |
| 2539 | * - task pinned (EVENT_PINNED) |
| 2540 | * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE) |
| 2541 | * - task flexible (EVENT_FLEXIBLE). |
| 2542 | * |
| 2543 | * In order to avoid unscheduling and scheduling back in everything every |
| 2544 | * time an event is added, only do it for the groups of equal priority and |
| 2545 | * below. |
| 2546 | * |
| 2547 | * This can be called after a batch operation on task events, in which case |
| 2548 | * event_type is a bit mask of the types of events involved. For CPU events, |
| 2549 | * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE. |
| 2550 | */ |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 2551 | static void ctx_resched(struct perf_cpu_context *cpuctx, |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2552 | struct perf_event_context *task_ctx, |
| 2553 | enum event_type_t event_type) |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 2554 | { |
Song Liu | bd903af | 2018-03-05 21:55:04 -0800 | [diff] [blame] | 2555 | enum event_type_t ctx_event_type; |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2556 | bool cpu_event = !!(event_type & EVENT_CPU); |
| 2557 | |
| 2558 | /* |
| 2559 | * If pinned groups are involved, flexible groups also need to be |
| 2560 | * scheduled out. |
| 2561 | */ |
| 2562 | if (event_type & EVENT_PINNED) |
| 2563 | event_type |= EVENT_FLEXIBLE; |
| 2564 | |
Song Liu | bd903af | 2018-03-05 21:55:04 -0800 | [diff] [blame] | 2565 | ctx_event_type = event_type & EVENT_ALL; |
| 2566 | |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 2567 | perf_pmu_disable(cpuctx->ctx.pmu); |
| 2568 | if (task_ctx) |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2569 | task_ctx_sched_out(cpuctx, task_ctx, event_type); |
| 2570 | |
| 2571 | /* |
| 2572 | * Decide which cpu ctx groups to schedule out based on the types |
| 2573 | * of events that caused rescheduling: |
| 2574 | * - EVENT_CPU: schedule out corresponding groups; |
| 2575 | * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups; |
| 2576 | * - otherwise, do nothing more. |
| 2577 | */ |
| 2578 | if (cpu_event) |
| 2579 | cpu_ctx_sched_out(cpuctx, ctx_event_type); |
| 2580 | else if (ctx_event_type & EVENT_PINNED) |
| 2581 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
| 2582 | |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 2583 | perf_event_sched_in(cpuctx, task_ctx, current); |
| 2584 | perf_pmu_enable(cpuctx->ctx.pmu); |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 2585 | } |
| 2586 | |
Stephane Eranian | c68d224 | 2019-04-08 10:32:51 -0700 | [diff] [blame] | 2587 | void perf_pmu_resched(struct pmu *pmu) |
| 2588 | { |
| 2589 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| 2590 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
| 2591 | |
| 2592 | perf_ctx_lock(cpuctx, task_ctx); |
| 2593 | ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); |
| 2594 | perf_ctx_unlock(cpuctx, task_ctx); |
| 2595 | } |
| 2596 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2597 | /* |
| 2598 | * Cross CPU call to install and enable a performance event |
| 2599 | * |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2600 | * Very similar to remote_function() + event_function() but cannot assume that |
| 2601 | * things like ctx->is_active and cpuctx->task_ctx are set. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2602 | */ |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2603 | static int __perf_install_in_context(void *info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2604 | { |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2605 | struct perf_event *event = info; |
| 2606 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 2607 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
Peter Zijlstra | 2c29ef0 | 2011-04-09 21:17:44 +0200 | [diff] [blame] | 2608 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2609 | bool reprogram = true; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2610 | int ret = 0; |
Peter Zijlstra | 2c29ef0 | 2011-04-09 21:17:44 +0200 | [diff] [blame] | 2611 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2612 | raw_spin_lock(&cpuctx->ctx.lock); |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2613 | if (ctx->task) { |
Peter Zijlstra | b58f6b0 | 2011-06-07 00:23:28 +0200 | [diff] [blame] | 2614 | raw_spin_lock(&ctx->lock); |
| 2615 | task_ctx = ctx; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2616 | |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2617 | reprogram = (ctx->task == current); |
| 2618 | |
| 2619 | /* |
| 2620 | * If the task is running, it must be running on this CPU, |
| 2621 | * otherwise we cannot reprogram things. |
| 2622 | * |
| 2623 | * If its not running, we don't care, ctx->lock will |
| 2624 | * serialize against it becoming runnable. |
| 2625 | */ |
| 2626 | if (task_curr(ctx->task) && !reprogram) { |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2627 | ret = -ESRCH; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2628 | goto unlock; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2629 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2630 | |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2631 | WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2632 | } else if (task_ctx) { |
| 2633 | raw_spin_lock(&task_ctx->lock); |
Peter Zijlstra | b58f6b0 | 2011-06-07 00:23:28 +0200 | [diff] [blame] | 2634 | } |
| 2635 | |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 2636 | #ifdef CONFIG_CGROUP_PERF |
| 2637 | if (is_cgroup_event(event)) { |
| 2638 | /* |
| 2639 | * If the current cgroup doesn't match the event's |
| 2640 | * cgroup, we should not try to schedule it. |
| 2641 | */ |
| 2642 | struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); |
| 2643 | reprogram = cgroup_is_descendant(cgrp->css.cgroup, |
| 2644 | event->cgrp->css.cgroup); |
| 2645 | } |
| 2646 | #endif |
| 2647 | |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2648 | if (reprogram) { |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2649 | ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
| 2650 | add_event_to_ctx(event, ctx); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2651 | ctx_resched(cpuctx, task_ctx, get_event_type(event)); |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2652 | } else { |
| 2653 | add_event_to_ctx(event, ctx); |
| 2654 | } |
| 2655 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2656 | unlock: |
Peter Zijlstra | 2c29ef0 | 2011-04-09 21:17:44 +0200 | [diff] [blame] | 2657 | perf_ctx_unlock(cpuctx, task_ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2658 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2659 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2660 | } |
| 2661 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 2662 | static bool exclusive_event_installable(struct perf_event *event, |
| 2663 | struct perf_event_context *ctx); |
| 2664 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2665 | /* |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2666 | * Attach a performance event to a context. |
| 2667 | * |
| 2668 | * Very similar to event_function_call, see comment there. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2669 | */ |
| 2670 | static void |
| 2671 | perf_install_in_context(struct perf_event_context *ctx, |
| 2672 | struct perf_event *event, |
| 2673 | int cpu) |
| 2674 | { |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2675 | struct task_struct *task = READ_ONCE(ctx->task); |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2676 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2677 | lockdep_assert_held(&ctx->mutex); |
| 2678 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 2679 | WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); |
| 2680 | |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 2681 | if (event->cpu != -1) |
| 2682 | event->cpu = cpu; |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 2683 | |
Peter Zijlstra | 0b8f1e2 | 2016-08-04 14:37:24 +0200 | [diff] [blame] | 2684 | /* |
| 2685 | * Ensures that if we can observe event->ctx, both the event and ctx |
| 2686 | * will be 'complete'. See perf_iterate_sb_cpu(). |
| 2687 | */ |
| 2688 | smp_store_release(&event->ctx, ctx); |
| 2689 | |
Peter Zijlstra | db0503e | 2019-10-21 16:02:39 +0200 | [diff] [blame] | 2690 | /* |
| 2691 | * perf_event_attr::disabled events will not run and can be initialized |
| 2692 | * without IPI. Except when this is the first event for the context, in |
| 2693 | * that case we need the magic of the IPI to set ctx->is_active. |
| 2694 | * |
| 2695 | * The IOC_ENABLE that is sure to follow the creation of a disabled |
| 2696 | * event will issue the IPI and reprogram the hardware. |
| 2697 | */ |
| 2698 | if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) { |
| 2699 | raw_spin_lock_irq(&ctx->lock); |
| 2700 | if (ctx->task == TASK_TOMBSTONE) { |
| 2701 | raw_spin_unlock_irq(&ctx->lock); |
| 2702 | return; |
| 2703 | } |
| 2704 | add_event_to_ctx(event, ctx); |
| 2705 | raw_spin_unlock_irq(&ctx->lock); |
| 2706 | return; |
| 2707 | } |
| 2708 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2709 | if (!task) { |
| 2710 | cpu_function_call(cpu, __perf_install_in_context, event); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2711 | return; |
| 2712 | } |
Peter Zijlstra | 6f932e5 | 2016-02-24 18:45:43 +0100 | [diff] [blame] | 2713 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2714 | /* |
| 2715 | * Should not happen, we validate the ctx is still alive before calling. |
| 2716 | */ |
| 2717 | if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) |
| 2718 | return; |
| 2719 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2720 | /* |
| 2721 | * Installing events is tricky because we cannot rely on ctx->is_active |
| 2722 | * to be set in case this is the nr_events 0 -> 1 transition. |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2723 | * |
| 2724 | * Instead we use task_curr(), which tells us if the task is running. |
| 2725 | * However, since we use task_curr() outside of rq::lock, we can race |
| 2726 | * against the actual state. This means the result can be wrong. |
| 2727 | * |
| 2728 | * If we get a false positive, we retry, this is harmless. |
| 2729 | * |
| 2730 | * If we get a false negative, things are complicated. If we are after |
| 2731 | * perf_event_context_sched_in() ctx::lock will serialize us, and the |
| 2732 | * value must be correct. If we're before, it doesn't matter since |
| 2733 | * perf_event_context_sched_in() will program the counter. |
| 2734 | * |
| 2735 | * However, this hinges on the remote context switch having observed |
| 2736 | * our task->perf_event_ctxp[] store, such that it will in fact take |
| 2737 | * ctx::lock in perf_event_context_sched_in(). |
| 2738 | * |
| 2739 | * We do this by task_function_call(), if the IPI fails to hit the task |
| 2740 | * we know any future context switch of task must see the |
| 2741 | * perf_event_ctpx[] store. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2742 | */ |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2743 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2744 | /* |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2745 | * This smp_mb() orders the task->perf_event_ctxp[] store with the |
| 2746 | * task_cpu() load, such that if the IPI then does not find the task |
| 2747 | * running, a future context switch of that task must observe the |
| 2748 | * store. |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2749 | */ |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2750 | smp_mb(); |
| 2751 | again: |
| 2752 | if (!task_function_call(task, __perf_install_in_context, event)) |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2753 | return; |
| 2754 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2755 | raw_spin_lock_irq(&ctx->lock); |
| 2756 | task = ctx->task; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2757 | if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { |
| 2758 | /* |
| 2759 | * Cannot happen because we already checked above (which also |
| 2760 | * cannot happen), and we hold ctx->mutex, which serializes us |
| 2761 | * against perf_event_exit_task_context(). |
| 2762 | */ |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2763 | raw_spin_unlock_irq(&ctx->lock); |
| 2764 | return; |
| 2765 | } |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2766 | /* |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2767 | * If the task is not running, ctx->lock will avoid it becoming so, |
| 2768 | * thus we can safely install the event. |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2769 | */ |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2770 | if (task_curr(task)) { |
| 2771 | raw_spin_unlock_irq(&ctx->lock); |
| 2772 | goto again; |
| 2773 | } |
| 2774 | add_event_to_ctx(event, ctx); |
| 2775 | raw_spin_unlock_irq(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2776 | } |
| 2777 | |
| 2778 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2779 | * Cross CPU call to enable a performance event |
| 2780 | */ |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2781 | static void __perf_event_enable(struct perf_event *event, |
| 2782 | struct perf_cpu_context *cpuctx, |
| 2783 | struct perf_event_context *ctx, |
| 2784 | void *info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2785 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2786 | struct perf_event *leader = event->group_leader; |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2787 | struct perf_event_context *task_ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2788 | |
Peter Zijlstra | 6e801e01 | 2016-01-26 12:17:08 +0100 | [diff] [blame] | 2789 | if (event->state >= PERF_EVENT_STATE_INACTIVE || |
| 2790 | event->state <= PERF_EVENT_STATE_ERROR) |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2791 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2792 | |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2793 | if (ctx->is_active) |
| 2794 | ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
| 2795 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2796 | perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2797 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2798 | if (!ctx->is_active) |
| 2799 | return; |
| 2800 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2801 | if (!event_filter_match(event)) { |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2802 | ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2803 | return; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 2804 | } |
Peter Zijlstra | f4c4176 | 2009-12-16 17:55:54 +0100 | [diff] [blame] | 2805 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2806 | /* |
| 2807 | * If the event is in a group and isn't the group leader, |
| 2808 | * then don't put it on unless the group is on. |
| 2809 | */ |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2810 | if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { |
| 2811 | ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2812 | return; |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2813 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2814 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2815 | task_ctx = cpuctx->task_ctx; |
| 2816 | if (ctx->task) |
| 2817 | WARN_ON_ONCE(task_ctx != ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2818 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2819 | ctx_resched(cpuctx, task_ctx, get_event_type(event)); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2820 | } |
| 2821 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2822 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 2823 | * Enable an event. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2824 | * |
| 2825 | * If event->ctx is a cloned context, callers must make sure that |
| 2826 | * every task struct that event->ctx->task could possibly point to |
| 2827 | * remains valid. This condition is satisfied when called through |
| 2828 | * perf_event_for_each_child or perf_event_for_each as described |
| 2829 | * for perf_event_disable. |
| 2830 | */ |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2831 | static void _perf_event_enable(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2832 | { |
| 2833 | struct perf_event_context *ctx = event->ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2834 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2835 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 6e801e01 | 2016-01-26 12:17:08 +0100 | [diff] [blame] | 2836 | if (event->state >= PERF_EVENT_STATE_INACTIVE || |
| 2837 | event->state < PERF_EVENT_STATE_ERROR) { |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2838 | raw_spin_unlock_irq(&ctx->lock); |
| 2839 | return; |
| 2840 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2841 | |
| 2842 | /* |
| 2843 | * If the event is in error state, clear that first. |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2844 | * |
| 2845 | * That way, if we see the event in error state below, we know that it |
| 2846 | * has gone back into error state, as distinct from the task having |
| 2847 | * been scheduled away before the cross-call arrived. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2848 | */ |
| 2849 | if (event->state == PERF_EVENT_STATE_ERROR) |
| 2850 | event->state = PERF_EVENT_STATE_OFF; |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2851 | raw_spin_unlock_irq(&ctx->lock); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2852 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2853 | event_function_call(event, __perf_event_enable, NULL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2854 | } |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2855 | |
| 2856 | /* |
| 2857 | * See perf_event_disable(); |
| 2858 | */ |
| 2859 | void perf_event_enable(struct perf_event *event) |
| 2860 | { |
| 2861 | struct perf_event_context *ctx; |
| 2862 | |
| 2863 | ctx = perf_event_ctx_lock(event); |
| 2864 | _perf_event_enable(event); |
| 2865 | perf_event_ctx_unlock(event, ctx); |
| 2866 | } |
Robert Richter | dcfce4a | 2011-10-11 17:11:08 +0200 | [diff] [blame] | 2867 | EXPORT_SYMBOL_GPL(perf_event_enable); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2868 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2869 | struct stop_event_data { |
| 2870 | struct perf_event *event; |
| 2871 | unsigned int restart; |
| 2872 | }; |
| 2873 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 2874 | static int __perf_event_stop(void *info) |
| 2875 | { |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2876 | struct stop_event_data *sd = info; |
| 2877 | struct perf_event *event = sd->event; |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 2878 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2879 | /* if it's already INACTIVE, do nothing */ |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 2880 | if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) |
| 2881 | return 0; |
| 2882 | |
| 2883 | /* matches smp_wmb() in event_sched_in() */ |
| 2884 | smp_rmb(); |
| 2885 | |
| 2886 | /* |
| 2887 | * There is a window with interrupts enabled before we get here, |
| 2888 | * so we need to check again lest we try to stop another CPU's event. |
| 2889 | */ |
| 2890 | if (READ_ONCE(event->oncpu) != smp_processor_id()) |
| 2891 | return -EAGAIN; |
| 2892 | |
| 2893 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 2894 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2895 | /* |
| 2896 | * May race with the actual stop (through perf_pmu_output_stop()), |
| 2897 | * but it is only used for events with AUX ring buffer, and such |
| 2898 | * events will refuse to restart because of rb::aux_mmap_count==0, |
| 2899 | * see comments in perf_aux_output_begin(). |
| 2900 | * |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 2901 | * Since this is happening on an event-local CPU, no trace is lost |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2902 | * while restarting. |
| 2903 | */ |
| 2904 | if (sd->restart) |
Will Deacon | c9bbdd4 | 2016-08-15 11:42:45 +0100 | [diff] [blame] | 2905 | event->pmu->start(event, 0); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2906 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 2907 | return 0; |
| 2908 | } |
| 2909 | |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 2910 | static int perf_event_stop(struct perf_event *event, int restart) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2911 | { |
| 2912 | struct stop_event_data sd = { |
| 2913 | .event = event, |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 2914 | .restart = restart, |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2915 | }; |
| 2916 | int ret = 0; |
| 2917 | |
| 2918 | do { |
| 2919 | if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) |
| 2920 | return 0; |
| 2921 | |
| 2922 | /* matches smp_wmb() in event_sched_in() */ |
| 2923 | smp_rmb(); |
| 2924 | |
| 2925 | /* |
| 2926 | * We only want to restart ACTIVE events, so if the event goes |
| 2927 | * inactive here (event->oncpu==-1), there's nothing more to do; |
| 2928 | * fall through with ret==-ENXIO. |
| 2929 | */ |
| 2930 | ret = cpu_function_call(READ_ONCE(event->oncpu), |
| 2931 | __perf_event_stop, &sd); |
| 2932 | } while (ret == -EAGAIN); |
| 2933 | |
| 2934 | return ret; |
| 2935 | } |
| 2936 | |
| 2937 | /* |
| 2938 | * In order to contain the amount of racy and tricky in the address filter |
| 2939 | * configuration management, it is a two part process: |
| 2940 | * |
| 2941 | * (p1) when userspace mappings change as a result of (1) or (2) or (3) below, |
| 2942 | * we update the addresses of corresponding vmas in |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 2943 | * event::addr_filter_ranges array and bump the event::addr_filters_gen; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 2944 | * (p2) when an event is scheduled in (pmu::add), it calls |
| 2945 | * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync() |
| 2946 | * if the generation has changed since the previous call. |
| 2947 | * |
| 2948 | * If (p1) happens while the event is active, we restart it to force (p2). |
| 2949 | * |
| 2950 | * (1) perf_addr_filters_apply(): adjusting filters' offsets based on |
| 2951 | * pre-existing mappings, called once when new filters arrive via SET_FILTER |
| 2952 | * ioctl; |
| 2953 | * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly |
| 2954 | * registered mapping, called for every new mmap(), with mm::mmap_sem down |
| 2955 | * for reading; |
| 2956 | * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process |
| 2957 | * of exec. |
| 2958 | */ |
| 2959 | void perf_event_addr_filters_sync(struct perf_event *event) |
| 2960 | { |
| 2961 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 2962 | |
| 2963 | if (!has_addr_filter(event)) |
| 2964 | return; |
| 2965 | |
| 2966 | raw_spin_lock(&ifh->lock); |
| 2967 | if (event->addr_filters_gen != event->hw.addr_filters_gen) { |
| 2968 | event->pmu->addr_filters_sync(event); |
| 2969 | event->hw.addr_filters_gen = event->addr_filters_gen; |
| 2970 | } |
| 2971 | raw_spin_unlock(&ifh->lock); |
| 2972 | } |
| 2973 | EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync); |
| 2974 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2975 | static int _perf_event_refresh(struct perf_event *event, int refresh) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2976 | { |
| 2977 | /* |
| 2978 | * not supported on inherited events |
| 2979 | */ |
Franck Bui-Huu | 2e939d1 | 2010-11-23 16:21:44 +0100 | [diff] [blame] | 2980 | if (event->attr.inherit || !is_sampling_event(event)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2981 | return -EINVAL; |
| 2982 | |
| 2983 | atomic_add(refresh, &event->event_limit); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2984 | _perf_event_enable(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2985 | |
| 2986 | return 0; |
| 2987 | } |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2988 | |
| 2989 | /* |
| 2990 | * See perf_event_disable() |
| 2991 | */ |
| 2992 | int perf_event_refresh(struct perf_event *event, int refresh) |
| 2993 | { |
| 2994 | struct perf_event_context *ctx; |
| 2995 | int ret; |
| 2996 | |
| 2997 | ctx = perf_event_ctx_lock(event); |
| 2998 | ret = _perf_event_refresh(event, refresh); |
| 2999 | perf_event_ctx_unlock(event, ctx); |
| 3000 | |
| 3001 | return ret; |
| 3002 | } |
Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 3003 | EXPORT_SYMBOL_GPL(perf_event_refresh); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3004 | |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3005 | static int perf_event_modify_breakpoint(struct perf_event *bp, |
| 3006 | struct perf_event_attr *attr) |
| 3007 | { |
| 3008 | int err; |
| 3009 | |
| 3010 | _perf_event_disable(bp); |
| 3011 | |
| 3012 | err = modify_user_hw_breakpoint_check(bp, attr, true); |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3013 | |
Jiri Olsa | bf06278 | 2018-08-27 11:12:28 +0200 | [diff] [blame] | 3014 | if (!bp->attr.disabled) |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3015 | _perf_event_enable(bp); |
Jiri Olsa | bf06278 | 2018-08-27 11:12:28 +0200 | [diff] [blame] | 3016 | |
| 3017 | return err; |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3018 | } |
| 3019 | |
| 3020 | static int perf_event_modify_attr(struct perf_event *event, |
| 3021 | struct perf_event_attr *attr) |
| 3022 | { |
| 3023 | if (event->attr.type != attr->type) |
| 3024 | return -EINVAL; |
| 3025 | |
| 3026 | switch (event->attr.type) { |
| 3027 | case PERF_TYPE_BREAKPOINT: |
| 3028 | return perf_event_modify_breakpoint(event, attr); |
| 3029 | default: |
| 3030 | /* Place holder for future additions. */ |
| 3031 | return -EOPNOTSUPP; |
| 3032 | } |
| 3033 | } |
| 3034 | |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3035 | static void ctx_sched_out(struct perf_event_context *ctx, |
| 3036 | struct perf_cpu_context *cpuctx, |
| 3037 | enum event_type_t event_type) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3038 | { |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 3039 | struct perf_event *event, *tmp; |
Peter Zijlstra | db24d33 | 2011-04-09 21:17:45 +0200 | [diff] [blame] | 3040 | int is_active = ctx->is_active; |
Peter Zijlstra | c994d61 | 2016-01-08 09:20:23 +0100 | [diff] [blame] | 3041 | |
| 3042 | lockdep_assert_held(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3043 | |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 3044 | if (likely(!ctx->nr_events)) { |
| 3045 | /* |
| 3046 | * See __perf_remove_from_context(). |
| 3047 | */ |
| 3048 | WARN_ON_ONCE(ctx->is_active); |
| 3049 | if (ctx->task) |
| 3050 | WARN_ON_ONCE(cpuctx->task_ctx); |
| 3051 | return; |
| 3052 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3053 | |
Peter Zijlstra | db24d33 | 2011-04-09 21:17:45 +0200 | [diff] [blame] | 3054 | ctx->is_active &= ~event_type; |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3055 | if (!(ctx->is_active & EVENT_ALL)) |
| 3056 | ctx->is_active = 0; |
| 3057 | |
Peter Zijlstra | 63e30d3 | 2016-01-08 11:39:10 +0100 | [diff] [blame] | 3058 | if (ctx->task) { |
| 3059 | WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
| 3060 | if (!ctx->is_active) |
| 3061 | cpuctx->task_ctx = NULL; |
| 3062 | } |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3063 | |
Peter Zijlstra | 8fdc653 | 2016-03-29 09:26:44 +0200 | [diff] [blame] | 3064 | /* |
| 3065 | * Always update time if it was set; not only when it changes. |
| 3066 | * Otherwise we can 'forget' to update time for any but the last |
| 3067 | * context we sched out. For example: |
| 3068 | * |
| 3069 | * ctx_sched_out(.event_type = EVENT_FLEXIBLE) |
| 3070 | * ctx_sched_out(.event_type = EVENT_PINNED) |
| 3071 | * |
| 3072 | * would only update time for the pinned events. |
| 3073 | */ |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3074 | if (is_active & EVENT_TIME) { |
| 3075 | /* update (and stop) ctx time */ |
| 3076 | update_context_time(ctx); |
| 3077 | update_cgrp_time_from_cpuctx(cpuctx); |
| 3078 | } |
| 3079 | |
Peter Zijlstra | 8fdc653 | 2016-03-29 09:26:44 +0200 | [diff] [blame] | 3080 | is_active ^= ctx->is_active; /* changed bits */ |
| 3081 | |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3082 | if (!ctx->nr_active || !(is_active & EVENT_ALL)) |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3083 | return; |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3084 | |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 3085 | /* |
| 3086 | * If we had been multiplexing, no rotations are necessary, now no events |
| 3087 | * are active. |
| 3088 | */ |
| 3089 | ctx->rotate_necessary = 0; |
| 3090 | |
Peter Zijlstra | 075e0b0 | 2011-04-09 21:17:40 +0200 | [diff] [blame] | 3091 | perf_pmu_disable(ctx->pmu); |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3092 | if (is_active & EVENT_PINNED) { |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 3093 | list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 3094 | group_sched_out(event, cpuctx, ctx); |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 3095 | } |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 3096 | |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3097 | if (is_active & EVENT_FLEXIBLE) { |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 3098 | list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) |
Xiao Guangrong | 8c9ed8e | 2009-09-25 13:51:17 +0800 | [diff] [blame] | 3099 | group_sched_out(event, cpuctx, ctx); |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 3100 | } |
Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 3101 | perf_pmu_enable(ctx->pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3102 | } |
| 3103 | |
| 3104 | /* |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3105 | * Test whether two contexts are equivalent, i.e. whether they have both been |
| 3106 | * cloned from the same version of the same context. |
| 3107 | * |
| 3108 | * Equivalence is measured using a generation number in the context that is |
| 3109 | * incremented on each modification to it; see unclone_ctx(), list_add_event() |
| 3110 | * and list_del_event(). |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3111 | */ |
| 3112 | static int context_equiv(struct perf_event_context *ctx1, |
| 3113 | struct perf_event_context *ctx2) |
| 3114 | { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 3115 | lockdep_assert_held(&ctx1->lock); |
| 3116 | lockdep_assert_held(&ctx2->lock); |
| 3117 | |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3118 | /* Pinning disables the swap optimization */ |
| 3119 | if (ctx1->pin_count || ctx2->pin_count) |
| 3120 | return 0; |
| 3121 | |
| 3122 | /* If ctx1 is the parent of ctx2 */ |
| 3123 | if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) |
| 3124 | return 1; |
| 3125 | |
| 3126 | /* If ctx2 is the parent of ctx1 */ |
| 3127 | if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) |
| 3128 | return 1; |
| 3129 | |
| 3130 | /* |
| 3131 | * If ctx1 and ctx2 have the same parent; we flatten the parent |
| 3132 | * hierarchy, see perf_event_init_context(). |
| 3133 | */ |
| 3134 | if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && |
| 3135 | ctx1->parent_gen == ctx2->parent_gen) |
| 3136 | return 1; |
| 3137 | |
| 3138 | /* Unmatched */ |
| 3139 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3140 | } |
| 3141 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3142 | static void __perf_event_sync_stat(struct perf_event *event, |
| 3143 | struct perf_event *next_event) |
| 3144 | { |
| 3145 | u64 value; |
| 3146 | |
| 3147 | if (!event->attr.inherit_stat) |
| 3148 | return; |
| 3149 | |
| 3150 | /* |
| 3151 | * Update the event value, we cannot use perf_event_read() |
| 3152 | * because we're in the middle of a context switch and have IRQs |
| 3153 | * disabled, which upsets smp_call_function_single(), however |
| 3154 | * we know the event must be on the current CPU, therefore we |
| 3155 | * don't need to use it. |
| 3156 | */ |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 3157 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
Peter Zijlstra | 3dbebf1 | 2009-11-20 22:19:52 +0100 | [diff] [blame] | 3158 | event->pmu->read(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3159 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 3160 | perf_event_update_time(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3161 | |
| 3162 | /* |
| 3163 | * In order to keep per-task stats reliable we need to flip the event |
| 3164 | * values when we flip the contexts. |
| 3165 | */ |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 3166 | value = local64_read(&next_event->count); |
| 3167 | value = local64_xchg(&event->count, value); |
| 3168 | local64_set(&next_event->count, value); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3169 | |
| 3170 | swap(event->total_time_enabled, next_event->total_time_enabled); |
| 3171 | swap(event->total_time_running, next_event->total_time_running); |
| 3172 | |
| 3173 | /* |
| 3174 | * Since we swizzled the values, update the user visible data too. |
| 3175 | */ |
| 3176 | perf_event_update_userpage(event); |
| 3177 | perf_event_update_userpage(next_event); |
| 3178 | } |
| 3179 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3180 | static void perf_event_sync_stat(struct perf_event_context *ctx, |
| 3181 | struct perf_event_context *next_ctx) |
| 3182 | { |
| 3183 | struct perf_event *event, *next_event; |
| 3184 | |
| 3185 | if (!ctx->nr_stat) |
| 3186 | return; |
| 3187 | |
Peter Zijlstra | 02ffdbc | 2009-11-20 22:19:50 +0100 | [diff] [blame] | 3188 | update_context_time(ctx); |
| 3189 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3190 | event = list_first_entry(&ctx->event_list, |
| 3191 | struct perf_event, event_entry); |
| 3192 | |
| 3193 | next_event = list_first_entry(&next_ctx->event_list, |
| 3194 | struct perf_event, event_entry); |
| 3195 | |
| 3196 | while (&event->event_entry != &ctx->event_list && |
| 3197 | &next_event->event_entry != &next_ctx->event_list) { |
| 3198 | |
| 3199 | __perf_event_sync_stat(event, next_event); |
| 3200 | |
| 3201 | event = list_next_entry(event, event_entry); |
| 3202 | next_event = list_next_entry(next_event, event_entry); |
| 3203 | } |
| 3204 | } |
| 3205 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 3206 | static void perf_event_context_sched_out(struct task_struct *task, int ctxn, |
| 3207 | struct task_struct *next) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3208 | { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3209 | struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3210 | struct perf_event_context *next_ctx; |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3211 | struct perf_event_context *parent, *next_parent; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 3212 | struct perf_cpu_context *cpuctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3213 | int do_switch = 1; |
| 3214 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 3215 | if (likely(!ctx)) |
| 3216 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3217 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 3218 | cpuctx = __get_cpu_context(ctx); |
| 3219 | if (!cpuctx->task_ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3220 | return; |
| 3221 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3222 | rcu_read_lock(); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3223 | next_ctx = next->perf_event_ctxp[ctxn]; |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3224 | if (!next_ctx) |
| 3225 | goto unlock; |
| 3226 | |
| 3227 | parent = rcu_dereference(ctx->parent_ctx); |
| 3228 | next_parent = rcu_dereference(next_ctx->parent_ctx); |
| 3229 | |
| 3230 | /* If neither context have a parent context; they cannot be clones. */ |
Jiri Olsa | 802c8a6 | 2014-09-12 13:18:28 +0200 | [diff] [blame] | 3231 | if (!parent && !next_parent) |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3232 | goto unlock; |
| 3233 | |
| 3234 | if (next_parent == ctx || next_ctx == parent || next_parent == parent) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3235 | /* |
| 3236 | * Looks like the two contexts are clones, so we might be |
| 3237 | * able to optimize the context switch. We lock both |
| 3238 | * contexts and check that they are clones under the |
| 3239 | * lock (including re-checking that neither has been |
| 3240 | * uncloned in the meantime). It doesn't matter which |
| 3241 | * order we take the locks because no other cpu could |
| 3242 | * be trying to lock both of these tasks. |
| 3243 | */ |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 3244 | raw_spin_lock(&ctx->lock); |
| 3245 | raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3246 | if (context_equiv(ctx, next_ctx)) { |
Alexey Budankov | c2b98a8 | 2019-10-23 10:13:56 +0300 | [diff] [blame] | 3247 | struct pmu *pmu = ctx->pmu; |
| 3248 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 3249 | WRITE_ONCE(ctx->task, next); |
| 3250 | WRITE_ONCE(next_ctx->task, task); |
Yan, Zheng | 5a158c3 | 2014-11-04 21:56:02 -0500 | [diff] [blame] | 3251 | |
Alexey Budankov | c2b98a8 | 2019-10-23 10:13:56 +0300 | [diff] [blame] | 3252 | /* |
| 3253 | * PMU specific parts of task perf context can require |
| 3254 | * additional synchronization. As an example of such |
| 3255 | * synchronization see implementation details of Intel |
| 3256 | * LBR call stack data profiling; |
| 3257 | */ |
| 3258 | if (pmu->swap_task_ctx) |
| 3259 | pmu->swap_task_ctx(ctx, next_ctx); |
| 3260 | else |
| 3261 | swap(ctx->task_ctx_data, next_ctx->task_ctx_data); |
Yan, Zheng | 5a158c3 | 2014-11-04 21:56:02 -0500 | [diff] [blame] | 3262 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 3263 | /* |
| 3264 | * RCU_INIT_POINTER here is safe because we've not |
| 3265 | * modified the ctx and the above modification of |
| 3266 | * ctx->task and ctx->task_ctx_data are immaterial |
| 3267 | * since those values are always verified under |
| 3268 | * ctx->lock which we're now holding. |
| 3269 | */ |
| 3270 | RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); |
| 3271 | RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx); |
| 3272 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3273 | do_switch = 0; |
| 3274 | |
| 3275 | perf_event_sync_stat(ctx, next_ctx); |
| 3276 | } |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 3277 | raw_spin_unlock(&next_ctx->lock); |
| 3278 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3279 | } |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3280 | unlock: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3281 | rcu_read_unlock(); |
| 3282 | |
| 3283 | if (do_switch) { |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3284 | raw_spin_lock(&ctx->lock); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 3285 | task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3286 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3287 | } |
| 3288 | } |
| 3289 | |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3290 | static DEFINE_PER_CPU(struct list_head, sched_cb_list); |
| 3291 | |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3292 | void perf_sched_cb_dec(struct pmu *pmu) |
| 3293 | { |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3294 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| 3295 | |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3296 | this_cpu_dec(perf_sched_cb_usages); |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3297 | |
| 3298 | if (!--cpuctx->sched_cb_usage) |
| 3299 | list_del(&cpuctx->sched_cb_entry); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3300 | } |
| 3301 | |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3302 | |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3303 | void perf_sched_cb_inc(struct pmu *pmu) |
| 3304 | { |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3305 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| 3306 | |
| 3307 | if (!cpuctx->sched_cb_usage++) |
| 3308 | list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list)); |
| 3309 | |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3310 | this_cpu_inc(perf_sched_cb_usages); |
| 3311 | } |
| 3312 | |
| 3313 | /* |
| 3314 | * This function provides the context switch callback to the lower code |
| 3315 | * layer. It is invoked ONLY when the context switch callback is enabled. |
Peter Zijlstra | 09e61b4f | 2016-07-06 18:02:43 +0200 | [diff] [blame] | 3316 | * |
| 3317 | * This callback is relevant even to per-cpu events; for example multi event |
| 3318 | * PEBS requires this to provide PID/TID information. This requires we flush |
| 3319 | * all queued PEBS records before we context switch to a new task. |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3320 | */ |
| 3321 | static void perf_pmu_sched_task(struct task_struct *prev, |
| 3322 | struct task_struct *next, |
| 3323 | bool sched_in) |
| 3324 | { |
| 3325 | struct perf_cpu_context *cpuctx; |
| 3326 | struct pmu *pmu; |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3327 | |
| 3328 | if (prev == next) |
| 3329 | return; |
| 3330 | |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3331 | list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) { |
David Carrillo-Cisneros | 1fd7e41 | 2017-01-18 11:24:54 -0800 | [diff] [blame] | 3332 | pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */ |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3333 | |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3334 | if (WARN_ON_ONCE(!pmu->sched_task)) |
| 3335 | continue; |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3336 | |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3337 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
| 3338 | perf_pmu_disable(pmu); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3339 | |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3340 | pmu->sched_task(cpuctx->task_ctx, sched_in); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3341 | |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3342 | perf_pmu_enable(pmu); |
| 3343 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3344 | } |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3345 | } |
| 3346 | |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 3347 | static void perf_event_switch(struct task_struct *task, |
| 3348 | struct task_struct *next_prev, bool sched_in); |
| 3349 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3350 | #define for_each_task_context_nr(ctxn) \ |
| 3351 | for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) |
| 3352 | |
| 3353 | /* |
| 3354 | * Called from scheduler to remove the events of the current task, |
| 3355 | * with interrupts disabled. |
| 3356 | * |
| 3357 | * We stop each event and update the event value in event->count. |
| 3358 | * |
| 3359 | * This does not protect us against NMI, but disable() |
| 3360 | * sets the disabled bit in the control field of event _before_ |
| 3361 | * accessing the event control register. If a NMI hits, then it will |
| 3362 | * not restart the event. |
| 3363 | */ |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 3364 | void __perf_event_task_sched_out(struct task_struct *task, |
| 3365 | struct task_struct *next) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3366 | { |
| 3367 | int ctxn; |
| 3368 | |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3369 | if (__this_cpu_read(perf_sched_cb_usages)) |
| 3370 | perf_pmu_sched_task(task, next, false); |
| 3371 | |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 3372 | if (atomic_read(&nr_switch_events)) |
| 3373 | perf_event_switch(task, next, false); |
| 3374 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3375 | for_each_task_context_nr(ctxn) |
| 3376 | perf_event_context_sched_out(task, ctxn, next); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3377 | |
| 3378 | /* |
| 3379 | * if cgroup events exist on this CPU, then we need |
| 3380 | * to check if we have to switch out PMU state. |
| 3381 | * cgroup event are system-wide mode only |
| 3382 | */ |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 3383 | if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 3384 | perf_cgroup_sched_out(task, next); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3385 | } |
| 3386 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3387 | /* |
| 3388 | * Called with IRQs disabled |
| 3389 | */ |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3390 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, |
| 3391 | enum event_type_t event_type) |
| 3392 | { |
| 3393 | ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3394 | } |
| 3395 | |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3396 | static bool perf_less_group_idx(const void *l, const void *r) |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3397 | { |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3398 | const struct perf_event *le = l, *re = r; |
| 3399 | |
| 3400 | return le->group_index < re->group_index; |
| 3401 | } |
| 3402 | |
| 3403 | static void swap_ptr(void *l, void *r) |
| 3404 | { |
| 3405 | void **lp = l, **rp = r; |
| 3406 | |
| 3407 | swap(*lp, *rp); |
| 3408 | } |
| 3409 | |
| 3410 | static const struct min_heap_callbacks perf_min_heap = { |
| 3411 | .elem_size = sizeof(struct perf_event *), |
| 3412 | .less = perf_less_group_idx, |
| 3413 | .swp = swap_ptr, |
| 3414 | }; |
| 3415 | |
| 3416 | static void __heap_add(struct min_heap *heap, struct perf_event *event) |
| 3417 | { |
| 3418 | struct perf_event **itrs = heap->data; |
| 3419 | |
| 3420 | if (event) { |
| 3421 | itrs[heap->nr] = event; |
| 3422 | heap->nr++; |
| 3423 | } |
| 3424 | } |
| 3425 | |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame^] | 3426 | static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, |
| 3427 | struct perf_event_groups *groups, int cpu, |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3428 | int (*func)(struct perf_event *, void *), |
| 3429 | void *data) |
| 3430 | { |
| 3431 | /* Space for per CPU and/or any CPU event iterators. */ |
| 3432 | struct perf_event *itrs[2]; |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame^] | 3433 | struct min_heap event_heap; |
| 3434 | struct perf_event **evt; |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3435 | int ret; |
| 3436 | |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame^] | 3437 | if (cpuctx) { |
| 3438 | event_heap = (struct min_heap){ |
| 3439 | .data = cpuctx->heap, |
| 3440 | .nr = 0, |
| 3441 | .size = cpuctx->heap_size, |
| 3442 | }; |
| 3443 | } else { |
| 3444 | event_heap = (struct min_heap){ |
| 3445 | .data = itrs, |
| 3446 | .nr = 0, |
| 3447 | .size = ARRAY_SIZE(itrs), |
| 3448 | }; |
| 3449 | /* Events not within a CPU context may be on any CPU. */ |
| 3450 | __heap_add(&event_heap, perf_event_groups_first(groups, -1)); |
| 3451 | } |
| 3452 | evt = event_heap.data; |
| 3453 | |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3454 | __heap_add(&event_heap, perf_event_groups_first(groups, cpu)); |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3455 | |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3456 | min_heapify_all(&event_heap, &perf_min_heap); |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3457 | |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3458 | while (event_heap.nr) { |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3459 | ret = func(*evt, data); |
| 3460 | if (ret) |
| 3461 | return ret; |
| 3462 | |
| 3463 | *evt = perf_event_groups_next(*evt); |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3464 | if (*evt) |
| 3465 | min_heapify(&event_heap, 0, &perf_min_heap); |
| 3466 | else |
| 3467 | min_heap_pop(&event_heap, &perf_min_heap); |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3468 | } |
| 3469 | |
| 3470 | return 0; |
| 3471 | } |
| 3472 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3473 | static int merge_sched_in(struct perf_event *event, void *data) |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3474 | { |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3475 | struct perf_event_context *ctx = event->ctx; |
| 3476 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 3477 | int *can_add_hw = data; |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3478 | |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3479 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 3480 | return 0; |
| 3481 | |
| 3482 | if (!event_filter_match(event)) |
| 3483 | return 0; |
| 3484 | |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3485 | if (group_can_go_on(event, cpuctx, *can_add_hw)) { |
| 3486 | if (!group_sched_in(event, cpuctx, ctx)) |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3487 | list_add_tail(&event->active_list, get_event_list(event)); |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 3488 | } |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3489 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3490 | if (event->state == PERF_EVENT_STATE_INACTIVE) { |
| 3491 | if (event->attr.pinned) |
| 3492 | perf_event_set_state(event, PERF_EVENT_STATE_ERROR); |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3493 | |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3494 | *can_add_hw = 0; |
| 3495 | ctx->rotate_necessary = 1; |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3496 | } |
| 3497 | |
| 3498 | return 0; |
| 3499 | } |
| 3500 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3501 | static void |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3502 | ctx_pinned_sched_in(struct perf_event_context *ctx, |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 3503 | struct perf_cpu_context *cpuctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3504 | { |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3505 | int can_add_hw = 1; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3506 | |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame^] | 3507 | if (ctx != &cpuctx->ctx) |
| 3508 | cpuctx = NULL; |
| 3509 | |
| 3510 | visit_groups_merge(cpuctx, &ctx->pinned_groups, |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3511 | smp_processor_id(), |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3512 | merge_sched_in, &can_add_hw); |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3513 | } |
| 3514 | |
| 3515 | static void |
| 3516 | ctx_flexible_sched_in(struct perf_event_context *ctx, |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 3517 | struct perf_cpu_context *cpuctx) |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3518 | { |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3519 | int can_add_hw = 1; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3520 | |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame^] | 3521 | if (ctx != &cpuctx->ctx) |
| 3522 | cpuctx = NULL; |
| 3523 | |
| 3524 | visit_groups_merge(cpuctx, &ctx->flexible_groups, |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3525 | smp_processor_id(), |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3526 | merge_sched_in, &can_add_hw); |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3527 | } |
| 3528 | |
| 3529 | static void |
| 3530 | ctx_sched_in(struct perf_event_context *ctx, |
| 3531 | struct perf_cpu_context *cpuctx, |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3532 | enum event_type_t event_type, |
| 3533 | struct task_struct *task) |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3534 | { |
Peter Zijlstra | db24d33 | 2011-04-09 21:17:45 +0200 | [diff] [blame] | 3535 | int is_active = ctx->is_active; |
Peter Zijlstra | c994d61 | 2016-01-08 09:20:23 +0100 | [diff] [blame] | 3536 | u64 now; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3537 | |
Peter Zijlstra | c994d61 | 2016-01-08 09:20:23 +0100 | [diff] [blame] | 3538 | lockdep_assert_held(&ctx->lock); |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3539 | |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3540 | if (likely(!ctx->nr_events)) |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3541 | return; |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3542 | |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3543 | ctx->is_active |= (event_type | EVENT_TIME); |
Peter Zijlstra | 63e30d3 | 2016-01-08 11:39:10 +0100 | [diff] [blame] | 3544 | if (ctx->task) { |
| 3545 | if (!is_active) |
| 3546 | cpuctx->task_ctx = ctx; |
| 3547 | else |
| 3548 | WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
| 3549 | } |
| 3550 | |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3551 | is_active ^= ctx->is_active; /* changed bits */ |
| 3552 | |
| 3553 | if (is_active & EVENT_TIME) { |
| 3554 | /* start ctx time */ |
| 3555 | now = perf_clock(); |
| 3556 | ctx->timestamp = now; |
| 3557 | perf_cgroup_set_timestamp(task, ctx); |
| 3558 | } |
| 3559 | |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3560 | /* |
| 3561 | * First go through the list and put on any pinned groups |
| 3562 | * in order to give them the best chance of going on. |
| 3563 | */ |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3564 | if (is_active & EVENT_PINNED) |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 3565 | ctx_pinned_sched_in(ctx, cpuctx); |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3566 | |
| 3567 | /* Then walk through the lower prio flexible groups */ |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3568 | if (is_active & EVENT_FLEXIBLE) |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 3569 | ctx_flexible_sched_in(ctx, cpuctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3570 | } |
| 3571 | |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3572 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3573 | enum event_type_t event_type, |
| 3574 | struct task_struct *task) |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3575 | { |
| 3576 | struct perf_event_context *ctx = &cpuctx->ctx; |
| 3577 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3578 | ctx_sched_in(ctx, cpuctx, event_type, task); |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3579 | } |
| 3580 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3581 | static void perf_event_context_sched_in(struct perf_event_context *ctx, |
| 3582 | struct task_struct *task) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3583 | { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3584 | struct perf_cpu_context *cpuctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3585 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 3586 | cpuctx = __get_cpu_context(ctx); |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3587 | if (cpuctx->task_ctx == ctx) |
| 3588 | return; |
| 3589 | |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3590 | perf_ctx_lock(cpuctx, ctx); |
leilei.lin | fdccc3f | 2017-08-09 08:29:21 +0800 | [diff] [blame] | 3591 | /* |
| 3592 | * We must check ctx->nr_events while holding ctx->lock, such |
| 3593 | * that we serialize against perf_install_in_context(). |
| 3594 | */ |
| 3595 | if (!ctx->nr_events) |
| 3596 | goto unlock; |
| 3597 | |
Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 3598 | perf_pmu_disable(ctx->pmu); |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3599 | /* |
| 3600 | * We want to keep the following priority order: |
| 3601 | * cpu pinned (that don't need to move), task pinned, |
| 3602 | * cpu flexible, task flexible. |
Alexander Shishkin | fe45baf | 2017-01-19 18:43:29 +0200 | [diff] [blame] | 3603 | * |
| 3604 | * However, if task's ctx is not carrying any pinned |
| 3605 | * events, no need to flip the cpuctx's events around. |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3606 | */ |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 3607 | if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) |
Alexander Shishkin | fe45baf | 2017-01-19 18:43:29 +0200 | [diff] [blame] | 3608 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
Peter Zijlstra | 63e30d3 | 2016-01-08 11:39:10 +0100 | [diff] [blame] | 3609 | perf_event_sched_in(cpuctx, ctx, task); |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3610 | perf_pmu_enable(ctx->pmu); |
leilei.lin | fdccc3f | 2017-08-09 08:29:21 +0800 | [diff] [blame] | 3611 | |
| 3612 | unlock: |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3613 | perf_ctx_unlock(cpuctx, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3614 | } |
| 3615 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3616 | /* |
| 3617 | * Called from scheduler to add the events of the current task |
| 3618 | * with interrupts disabled. |
| 3619 | * |
| 3620 | * We restore the event value and then enable it. |
| 3621 | * |
| 3622 | * This does not protect us against NMI, but enable() |
| 3623 | * sets the enabled bit in the control field of event _before_ |
| 3624 | * accessing the event control register. If a NMI hits, then it will |
| 3625 | * keep the event running. |
| 3626 | */ |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 3627 | void __perf_event_task_sched_in(struct task_struct *prev, |
| 3628 | struct task_struct *task) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3629 | { |
| 3630 | struct perf_event_context *ctx; |
| 3631 | int ctxn; |
| 3632 | |
Peter Zijlstra | 7e41d17 | 2016-01-08 09:21:40 +0100 | [diff] [blame] | 3633 | /* |
| 3634 | * If cgroup events exist on this CPU, then we need to check if we have |
| 3635 | * to switch in PMU state; cgroup event are system-wide mode only. |
| 3636 | * |
| 3637 | * Since cgroup events are CPU events, we must schedule these in before |
| 3638 | * we schedule in the task events. |
| 3639 | */ |
| 3640 | if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) |
| 3641 | perf_cgroup_sched_in(prev, task); |
| 3642 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3643 | for_each_task_context_nr(ctxn) { |
| 3644 | ctx = task->perf_event_ctxp[ctxn]; |
| 3645 | if (likely(!ctx)) |
| 3646 | continue; |
| 3647 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3648 | perf_event_context_sched_in(ctx, task); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3649 | } |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 3650 | |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 3651 | if (atomic_read(&nr_switch_events)) |
| 3652 | perf_event_switch(task, prev, true); |
| 3653 | |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3654 | if (__this_cpu_read(perf_sched_cb_usages)) |
| 3655 | perf_pmu_sched_task(prev, task, true); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3656 | } |
| 3657 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3658 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
| 3659 | { |
| 3660 | u64 frequency = event->attr.sample_freq; |
| 3661 | u64 sec = NSEC_PER_SEC; |
| 3662 | u64 divisor, dividend; |
| 3663 | |
| 3664 | int count_fls, nsec_fls, frequency_fls, sec_fls; |
| 3665 | |
| 3666 | count_fls = fls64(count); |
| 3667 | nsec_fls = fls64(nsec); |
| 3668 | frequency_fls = fls64(frequency); |
| 3669 | sec_fls = 30; |
| 3670 | |
| 3671 | /* |
| 3672 | * We got @count in @nsec, with a target of sample_freq HZ |
| 3673 | * the target period becomes: |
| 3674 | * |
| 3675 | * @count * 10^9 |
| 3676 | * period = ------------------- |
| 3677 | * @nsec * sample_freq |
| 3678 | * |
| 3679 | */ |
| 3680 | |
| 3681 | /* |
| 3682 | * Reduce accuracy by one bit such that @a and @b converge |
| 3683 | * to a similar magnitude. |
| 3684 | */ |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 3685 | #define REDUCE_FLS(a, b) \ |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3686 | do { \ |
| 3687 | if (a##_fls > b##_fls) { \ |
| 3688 | a >>= 1; \ |
| 3689 | a##_fls--; \ |
| 3690 | } else { \ |
| 3691 | b >>= 1; \ |
| 3692 | b##_fls--; \ |
| 3693 | } \ |
| 3694 | } while (0) |
| 3695 | |
| 3696 | /* |
| 3697 | * Reduce accuracy until either term fits in a u64, then proceed with |
| 3698 | * the other, so that finally we can do a u64/u64 division. |
| 3699 | */ |
| 3700 | while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { |
| 3701 | REDUCE_FLS(nsec, frequency); |
| 3702 | REDUCE_FLS(sec, count); |
| 3703 | } |
| 3704 | |
| 3705 | if (count_fls + sec_fls > 64) { |
| 3706 | divisor = nsec * frequency; |
| 3707 | |
| 3708 | while (count_fls + sec_fls > 64) { |
| 3709 | REDUCE_FLS(count, sec); |
| 3710 | divisor >>= 1; |
| 3711 | } |
| 3712 | |
| 3713 | dividend = count * sec; |
| 3714 | } else { |
| 3715 | dividend = count * sec; |
| 3716 | |
| 3717 | while (nsec_fls + frequency_fls > 64) { |
| 3718 | REDUCE_FLS(nsec, frequency); |
| 3719 | dividend >>= 1; |
| 3720 | } |
| 3721 | |
| 3722 | divisor = nsec * frequency; |
| 3723 | } |
| 3724 | |
Peter Zijlstra | f6ab91ad | 2010-06-04 15:18:01 +0200 | [diff] [blame] | 3725 | if (!divisor) |
| 3726 | return dividend; |
| 3727 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3728 | return div64_u64(dividend, divisor); |
| 3729 | } |
| 3730 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3731 | static DEFINE_PER_CPU(int, perf_throttled_count); |
| 3732 | static DEFINE_PER_CPU(u64, perf_throttled_seq); |
| 3733 | |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 3734 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3735 | { |
| 3736 | struct hw_perf_event *hwc = &event->hw; |
Peter Zijlstra | f6ab91ad | 2010-06-04 15:18:01 +0200 | [diff] [blame] | 3737 | s64 period, sample_period; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3738 | s64 delta; |
| 3739 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3740 | period = perf_calculate_period(event, nsec, count); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3741 | |
| 3742 | delta = (s64)(period - hwc->sample_period); |
| 3743 | delta = (delta + 7) / 8; /* low pass filter */ |
| 3744 | |
| 3745 | sample_period = hwc->sample_period + delta; |
| 3746 | |
| 3747 | if (!sample_period) |
| 3748 | sample_period = 1; |
| 3749 | |
| 3750 | hwc->sample_period = sample_period; |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3751 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 3752 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 3753 | if (disable) |
| 3754 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 3755 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 3756 | local64_set(&hwc->period_left, 0); |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 3757 | |
| 3758 | if (disable) |
| 3759 | event->pmu->start(event, PERF_EF_RELOAD); |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3760 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3761 | } |
| 3762 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3763 | /* |
| 3764 | * combine freq adjustment with unthrottling to avoid two passes over the |
| 3765 | * events. At the same time, make sure, having freq events does not change |
| 3766 | * the rate of unthrottling as that would introduce bias. |
| 3767 | */ |
| 3768 | static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, |
| 3769 | int needs_unthr) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3770 | { |
| 3771 | struct perf_event *event; |
| 3772 | struct hw_perf_event *hwc; |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3773 | u64 now, period = TICK_NSEC; |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3774 | s64 delta; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3775 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3776 | /* |
| 3777 | * only need to iterate over all events iff: |
| 3778 | * - context have events in frequency mode (needs freq adjust) |
| 3779 | * - there are events to unthrottle on this cpu |
| 3780 | */ |
| 3781 | if (!(ctx->nr_freq || needs_unthr)) |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 3782 | return; |
| 3783 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3784 | raw_spin_lock(&ctx->lock); |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 3785 | perf_pmu_disable(ctx->pmu); |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3786 | |
Paul Mackerras | 03541f8 | 2009-10-14 16:58:03 +1100 | [diff] [blame] | 3787 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3788 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 3789 | continue; |
| 3790 | |
Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 3791 | if (!event_filter_match(event)) |
Peter Zijlstra | 5d27c23 | 2009-12-17 13:16:32 +0100 | [diff] [blame] | 3792 | continue; |
| 3793 | |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 3794 | perf_pmu_disable(event->pmu); |
| 3795 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3796 | hwc = &event->hw; |
| 3797 | |
Jiri Olsa | ae23bff | 2013-08-24 16:45:54 +0200 | [diff] [blame] | 3798 | if (hwc->interrupts == MAX_INTERRUPTS) { |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3799 | hwc->interrupts = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3800 | perf_log_throttle(event, 1); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 3801 | event->pmu->start(event, 0); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3802 | } |
| 3803 | |
| 3804 | if (!event->attr.freq || !event->attr.sample_freq) |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 3805 | goto next; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3806 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3807 | /* |
| 3808 | * stop the event and update event->count |
| 3809 | */ |
| 3810 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 3811 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 3812 | now = local64_read(&event->count); |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3813 | delta = now - hwc->freq_count_stamp; |
| 3814 | hwc->freq_count_stamp = now; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3815 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3816 | /* |
| 3817 | * restart the event |
| 3818 | * reload only if value has changed |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 3819 | * we have stopped the event so tell that |
| 3820 | * to perf_adjust_period() to avoid stopping it |
| 3821 | * twice. |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3822 | */ |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 3823 | if (delta > 0) |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 3824 | perf_adjust_period(event, period, delta, false); |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3825 | |
| 3826 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 3827 | next: |
| 3828 | perf_pmu_enable(event->pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3829 | } |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3830 | |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 3831 | perf_pmu_enable(ctx->pmu); |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3832 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3833 | } |
| 3834 | |
| 3835 | /* |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 3836 | * Move @event to the tail of the @ctx's elegible events. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3837 | */ |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 3838 | static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3839 | { |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 3840 | /* |
| 3841 | * Rotate the first entry last of non-pinned groups. Rotation might be |
| 3842 | * disabled by the inheritance code. |
| 3843 | */ |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 3844 | if (ctx->rotate_disable) |
| 3845 | return; |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 3846 | |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 3847 | perf_event_groups_delete(&ctx->flexible_groups, event); |
| 3848 | perf_event_groups_insert(&ctx->flexible_groups, event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3849 | } |
| 3850 | |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 3851 | /* pick an event from the flexible_groups to rotate */ |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3852 | static inline struct perf_event * |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 3853 | ctx_event_to_rotate(struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3854 | { |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 3855 | struct perf_event *event; |
| 3856 | |
| 3857 | /* pick the first active flexible event */ |
| 3858 | event = list_first_entry_or_null(&ctx->flexible_active, |
| 3859 | struct perf_event, active_list); |
| 3860 | |
| 3861 | /* if no active flexible event, pick the first event */ |
| 3862 | if (!event) { |
| 3863 | event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), |
| 3864 | typeof(*event), group_node); |
| 3865 | } |
| 3866 | |
| 3867 | return event; |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3868 | } |
| 3869 | |
| 3870 | static bool perf_rotate_context(struct perf_cpu_context *cpuctx) |
| 3871 | { |
| 3872 | struct perf_event *cpu_event = NULL, *task_event = NULL; |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 3873 | struct perf_event_context *task_ctx = NULL; |
| 3874 | int cpu_rotate, task_rotate; |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3875 | |
| 3876 | /* |
| 3877 | * Since we run this from IRQ context, nobody can install new |
| 3878 | * events, thus the event count values are stable. |
| 3879 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3880 | |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 3881 | cpu_rotate = cpuctx->ctx.rotate_necessary; |
| 3882 | task_ctx = cpuctx->task_ctx; |
| 3883 | task_rotate = task_ctx ? task_ctx->rotate_necessary : 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3884 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3885 | if (!(cpu_rotate || task_rotate)) |
| 3886 | return false; |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 3887 | |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3888 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 3889 | perf_pmu_disable(cpuctx->ctx.pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3890 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3891 | if (task_rotate) |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 3892 | task_event = ctx_event_to_rotate(task_ctx); |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3893 | if (cpu_rotate) |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 3894 | cpu_event = ctx_event_to_rotate(&cpuctx->ctx); |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 3895 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3896 | /* |
| 3897 | * As per the order given at ctx_resched() first 'pop' task flexible |
| 3898 | * and then, if needed CPU flexible. |
| 3899 | */ |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 3900 | if (task_event || (task_ctx && cpu_event)) |
| 3901 | ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE); |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3902 | if (cpu_event) |
| 3903 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
Peter Zijlstra | d4944a0 | 2010-03-08 13:51:20 +0100 | [diff] [blame] | 3904 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3905 | if (task_event) |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 3906 | rotate_ctx(task_ctx, task_event); |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3907 | if (cpu_event) |
| 3908 | rotate_ctx(&cpuctx->ctx, cpu_event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3909 | |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 3910 | perf_event_sched_in(cpuctx, task_ctx, current); |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 3911 | |
| 3912 | perf_pmu_enable(cpuctx->ctx.pmu); |
| 3913 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 3914 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 3915 | return true; |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 3916 | } |
| 3917 | |
| 3918 | void perf_event_task_tick(void) |
| 3919 | { |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 3920 | struct list_head *head = this_cpu_ptr(&active_ctx_list); |
| 3921 | struct perf_event_context *ctx, *tmp; |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3922 | int throttled; |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 3923 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 3924 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 3925 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3926 | __this_cpu_inc(perf_throttled_seq); |
| 3927 | throttled = __this_cpu_xchg(perf_throttled_count, 0); |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 3928 | tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3929 | |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 3930 | list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 3931 | perf_adjust_freq_unthr_context(ctx, throttled); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3932 | } |
| 3933 | |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 3934 | static int event_enable_on_exec(struct perf_event *event, |
| 3935 | struct perf_event_context *ctx) |
| 3936 | { |
| 3937 | if (!event->attr.enable_on_exec) |
| 3938 | return 0; |
| 3939 | |
| 3940 | event->attr.enable_on_exec = 0; |
| 3941 | if (event->state >= PERF_EVENT_STATE_INACTIVE) |
| 3942 | return 0; |
| 3943 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 3944 | perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 3945 | |
| 3946 | return 1; |
| 3947 | } |
| 3948 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3949 | /* |
| 3950 | * Enable all of a task's events that have been marked enable-on-exec. |
| 3951 | * This expects task == current. |
| 3952 | */ |
Peter Zijlstra | c127449 | 2015-12-10 20:57:40 +0100 | [diff] [blame] | 3953 | static void perf_event_enable_on_exec(int ctxn) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3954 | { |
Peter Zijlstra | c127449 | 2015-12-10 20:57:40 +0100 | [diff] [blame] | 3955 | struct perf_event_context *ctx, *clone_ctx = NULL; |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 3956 | enum event_type_t event_type = 0; |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 3957 | struct perf_cpu_context *cpuctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3958 | struct perf_event *event; |
| 3959 | unsigned long flags; |
| 3960 | int enabled = 0; |
| 3961 | |
| 3962 | local_irq_save(flags); |
Peter Zijlstra | c127449 | 2015-12-10 20:57:40 +0100 | [diff] [blame] | 3963 | ctx = current->perf_event_ctxp[ctxn]; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3964 | if (!ctx || !ctx->nr_events) |
| 3965 | goto out; |
| 3966 | |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 3967 | cpuctx = __get_cpu_context(ctx); |
| 3968 | perf_ctx_lock(cpuctx, ctx); |
Peter Zijlstra | 7fce250 | 2016-02-24 18:45:48 +0100 | [diff] [blame] | 3969 | ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 3970 | list_for_each_entry(event, &ctx->event_list, event_entry) { |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 3971 | enabled |= event_enable_on_exec(event, ctx); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 3972 | event_type |= get_event_type(event); |
| 3973 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3974 | |
| 3975 | /* |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 3976 | * Unclone and reschedule this context if we enabled any event. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3977 | */ |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 3978 | if (enabled) { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 3979 | clone_ctx = unclone_ctx(ctx); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 3980 | ctx_resched(cpuctx, ctx, event_type); |
Peter Zijlstra | 7bbba0e | 2017-02-15 16:12:20 +0100 | [diff] [blame] | 3981 | } else { |
| 3982 | ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 3983 | } |
| 3984 | perf_ctx_unlock(cpuctx, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3985 | |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 3986 | out: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3987 | local_irq_restore(flags); |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 3988 | |
| 3989 | if (clone_ctx) |
| 3990 | put_ctx(clone_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3991 | } |
| 3992 | |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 3993 | struct perf_read_data { |
| 3994 | struct perf_event *event; |
| 3995 | bool group; |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 3996 | int ret; |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 3997 | }; |
| 3998 | |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 3999 | static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4000 | { |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4001 | u16 local_pkg, event_pkg; |
| 4002 | |
| 4003 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4004 | int local_cpu = smp_processor_id(); |
| 4005 | |
| 4006 | event_pkg = topology_physical_package_id(event_cpu); |
| 4007 | local_pkg = topology_physical_package_id(local_cpu); |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4008 | |
| 4009 | if (event_pkg == local_pkg) |
| 4010 | return local_cpu; |
| 4011 | } |
| 4012 | |
| 4013 | return event_cpu; |
| 4014 | } |
| 4015 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4016 | /* |
| 4017 | * Cross CPU call to read the hardware event |
| 4018 | */ |
| 4019 | static void __perf_event_read(void *info) |
| 4020 | { |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4021 | struct perf_read_data *data = info; |
| 4022 | struct perf_event *sub, *event = data->event; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4023 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4024 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4025 | struct pmu *pmu = event->pmu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4026 | |
| 4027 | /* |
| 4028 | * If this is a task context, we need to check whether it is |
| 4029 | * the current task context of this cpu. If not it has been |
| 4030 | * scheduled out before the smp call arrived. In that case |
| 4031 | * event->count would have been updated to a recent sample |
| 4032 | * when the event was scheduled out. |
| 4033 | */ |
| 4034 | if (ctx->task && cpuctx->task_ctx != ctx) |
| 4035 | return; |
| 4036 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4037 | raw_spin_lock(&ctx->lock); |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4038 | if (ctx->is_active & EVENT_TIME) { |
Peter Zijlstra | 542e72f | 2011-01-26 15:38:35 +0100 | [diff] [blame] | 4039 | update_context_time(ctx); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 4040 | update_cgrp_time_from_event(event); |
| 4041 | } |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4042 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4043 | perf_event_update_time(event); |
| 4044 | if (data->group) |
| 4045 | perf_event_update_sibling_time(event); |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4046 | |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4047 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4048 | goto unlock; |
| 4049 | |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4050 | if (!data->group) { |
| 4051 | pmu->read(event); |
| 4052 | data->ret = 0; |
| 4053 | goto unlock; |
| 4054 | } |
| 4055 | |
| 4056 | pmu->start_txn(pmu, PERF_PMU_TXN_READ); |
| 4057 | |
| 4058 | pmu->read(event); |
| 4059 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 4060 | for_each_sibling_event(sub, event) { |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4061 | if (sub->state == PERF_EVENT_STATE_ACTIVE) { |
| 4062 | /* |
| 4063 | * Use sibling's PMU rather than @event's since |
| 4064 | * sibling could be on different (eg: software) PMU. |
| 4065 | */ |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4066 | sub->pmu->read(sub); |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4067 | } |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4068 | } |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4069 | |
| 4070 | data->ret = pmu->commit_txn(pmu); |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4071 | |
| 4072 | unlock: |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4073 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4074 | } |
| 4075 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 4076 | static inline u64 perf_event_count(struct perf_event *event) |
| 4077 | { |
Vikas Shivappa | c39a0e2 | 2017-07-25 14:14:20 -0700 | [diff] [blame] | 4078 | return local64_read(&event->count) + atomic64_read(&event->child_count); |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 4079 | } |
| 4080 | |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4081 | /* |
| 4082 | * NMI-safe method to read a local event, that is an event that |
| 4083 | * is: |
| 4084 | * - either for the current task, or for this CPU |
| 4085 | * - does not have inherit set, for inherited task events |
| 4086 | * will not be local and we cannot read them atomically |
| 4087 | * - must not have a pmu::count method |
| 4088 | */ |
Yonghong Song | 7d9285e | 2017-10-05 09:19:19 -0700 | [diff] [blame] | 4089 | int perf_event_read_local(struct perf_event *event, u64 *value, |
| 4090 | u64 *enabled, u64 *running) |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4091 | { |
| 4092 | unsigned long flags; |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4093 | int ret = 0; |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4094 | |
| 4095 | /* |
| 4096 | * Disabling interrupts avoids all counter scheduling (context |
| 4097 | * switches, timer based rotation and IPIs). |
| 4098 | */ |
| 4099 | local_irq_save(flags); |
| 4100 | |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4101 | /* |
| 4102 | * It must not be an event with inherit set, we cannot read |
| 4103 | * all child counters from atomic context. |
| 4104 | */ |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4105 | if (event->attr.inherit) { |
| 4106 | ret = -EOPNOTSUPP; |
| 4107 | goto out; |
| 4108 | } |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4109 | |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4110 | /* If this is a per-task event, it must be for current */ |
| 4111 | if ((event->attach_state & PERF_ATTACH_TASK) && |
| 4112 | event->hw.target != current) { |
| 4113 | ret = -EINVAL; |
| 4114 | goto out; |
| 4115 | } |
| 4116 | |
| 4117 | /* If this is a per-CPU event, it must be for this CPU */ |
| 4118 | if (!(event->attach_state & PERF_ATTACH_TASK) && |
| 4119 | event->cpu != smp_processor_id()) { |
| 4120 | ret = -EINVAL; |
| 4121 | goto out; |
| 4122 | } |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4123 | |
Reinette Chatre | befb1b3 | 2018-09-19 10:29:06 -0700 | [diff] [blame] | 4124 | /* If this is a pinned event it must be running on this CPU */ |
| 4125 | if (event->attr.pinned && event->oncpu != smp_processor_id()) { |
| 4126 | ret = -EBUSY; |
| 4127 | goto out; |
| 4128 | } |
| 4129 | |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4130 | /* |
| 4131 | * If the event is currently on this CPU, its either a per-task event, |
| 4132 | * or local to this CPU. Furthermore it means its ACTIVE (otherwise |
| 4133 | * oncpu == -1). |
| 4134 | */ |
| 4135 | if (event->oncpu == smp_processor_id()) |
| 4136 | event->pmu->read(event); |
| 4137 | |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4138 | *value = local64_read(&event->count); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4139 | if (enabled || running) { |
| 4140 | u64 now = event->shadow_ctx_time + perf_clock(); |
| 4141 | u64 __enabled, __running; |
| 4142 | |
| 4143 | __perf_update_times(event, now, &__enabled, &__running); |
| 4144 | if (enabled) |
| 4145 | *enabled = __enabled; |
| 4146 | if (running) |
| 4147 | *running = __running; |
| 4148 | } |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4149 | out: |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4150 | local_irq_restore(flags); |
| 4151 | |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4152 | return ret; |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4153 | } |
| 4154 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4155 | static int perf_event_read(struct perf_event *event, bool group) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4156 | { |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4157 | enum perf_event_state state = READ_ONCE(event->state); |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4158 | int event_cpu, ret = 0; |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4159 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4160 | /* |
| 4161 | * If event is enabled and currently active on a CPU, update the |
| 4162 | * value in the event structure: |
| 4163 | */ |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4164 | again: |
| 4165 | if (state == PERF_EVENT_STATE_ACTIVE) { |
| 4166 | struct perf_read_data data; |
| 4167 | |
| 4168 | /* |
| 4169 | * Orders the ->state and ->oncpu loads such that if we see |
| 4170 | * ACTIVE we must also see the right ->oncpu. |
| 4171 | * |
| 4172 | * Matches the smp_wmb() from event_sched_in(). |
| 4173 | */ |
| 4174 | smp_rmb(); |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4175 | |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4176 | event_cpu = READ_ONCE(event->oncpu); |
| 4177 | if ((unsigned)event_cpu >= nr_cpu_ids) |
| 4178 | return 0; |
| 4179 | |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4180 | data = (struct perf_read_data){ |
| 4181 | .event = event, |
| 4182 | .group = group, |
| 4183 | .ret = 0, |
| 4184 | }; |
| 4185 | |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4186 | preempt_disable(); |
| 4187 | event_cpu = __perf_event_read_cpu(event, event_cpu); |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4188 | |
Peter Zijlstra | 5876314 | 2016-08-30 10:15:03 +0200 | [diff] [blame] | 4189 | /* |
| 4190 | * Purposely ignore the smp_call_function_single() return |
| 4191 | * value. |
| 4192 | * |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4193 | * If event_cpu isn't a valid CPU it means the event got |
Peter Zijlstra | 5876314 | 2016-08-30 10:15:03 +0200 | [diff] [blame] | 4194 | * scheduled out and that will have updated the event count. |
| 4195 | * |
| 4196 | * Therefore, either way, we'll have an up-to-date event count |
| 4197 | * after this. |
| 4198 | */ |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4199 | (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); |
| 4200 | preempt_enable(); |
Peter Zijlstra | 5876314 | 2016-08-30 10:15:03 +0200 | [diff] [blame] | 4201 | ret = data.ret; |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4202 | |
| 4203 | } else if (state == PERF_EVENT_STATE_INACTIVE) { |
Peter Zijlstra | 2b8988c | 2009-11-20 22:19:54 +0100 | [diff] [blame] | 4204 | struct perf_event_context *ctx = event->ctx; |
| 4205 | unsigned long flags; |
| 4206 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4207 | raw_spin_lock_irqsave(&ctx->lock, flags); |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4208 | state = event->state; |
| 4209 | if (state != PERF_EVENT_STATE_INACTIVE) { |
| 4210 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
| 4211 | goto again; |
| 4212 | } |
| 4213 | |
Stephane Eranian | c530ccd | 2010-10-15 15:26:01 +0200 | [diff] [blame] | 4214 | /* |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4215 | * May read while context is not active (e.g., thread is |
| 4216 | * blocked), in that case we cannot update context time |
Stephane Eranian | c530ccd | 2010-10-15 15:26:01 +0200 | [diff] [blame] | 4217 | */ |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4218 | if (ctx->is_active & EVENT_TIME) { |
Stephane Eranian | c530ccd | 2010-10-15 15:26:01 +0200 | [diff] [blame] | 4219 | update_context_time(ctx); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 4220 | update_cgrp_time_from_event(event); |
| 4221 | } |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4222 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4223 | perf_event_update_time(event); |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4224 | if (group) |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4225 | perf_event_update_sibling_time(event); |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4226 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4227 | } |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4228 | |
| 4229 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4230 | } |
| 4231 | |
| 4232 | /* |
| 4233 | * Initialize the perf_event context in a task_struct: |
| 4234 | */ |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4235 | static void __perf_event_init_context(struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4236 | { |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4237 | raw_spin_lock_init(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4238 | mutex_init(&ctx->mutex); |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 4239 | INIT_LIST_HEAD(&ctx->active_ctx_list); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 4240 | perf_event_groups_init(&ctx->pinned_groups); |
| 4241 | perf_event_groups_init(&ctx->flexible_groups); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4242 | INIT_LIST_HEAD(&ctx->event_list); |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 4243 | INIT_LIST_HEAD(&ctx->pinned_active); |
| 4244 | INIT_LIST_HEAD(&ctx->flexible_active); |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 4245 | refcount_set(&ctx->refcount, 1); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4246 | } |
| 4247 | |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4248 | static struct perf_event_context * |
| 4249 | alloc_perf_context(struct pmu *pmu, struct task_struct *task) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4250 | { |
| 4251 | struct perf_event_context *ctx; |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4252 | |
| 4253 | ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); |
| 4254 | if (!ctx) |
| 4255 | return NULL; |
| 4256 | |
| 4257 | __perf_event_init_context(ctx); |
Matthew Wilcox (Oracle) | 7b3c92b | 2019-07-04 15:13:23 -0700 | [diff] [blame] | 4258 | if (task) |
| 4259 | ctx->task = get_task_struct(task); |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4260 | ctx->pmu = pmu; |
| 4261 | |
| 4262 | return ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4263 | } |
| 4264 | |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4265 | static struct task_struct * |
| 4266 | find_lively_task_by_vpid(pid_t vpid) |
| 4267 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4268 | struct task_struct *task; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4269 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4270 | rcu_read_lock(); |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4271 | if (!vpid) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4272 | task = current; |
| 4273 | else |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4274 | task = find_task_by_vpid(vpid); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4275 | if (task) |
| 4276 | get_task_struct(task); |
| 4277 | rcu_read_unlock(); |
| 4278 | |
| 4279 | if (!task) |
| 4280 | return ERR_PTR(-ESRCH); |
| 4281 | |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4282 | return task; |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4283 | } |
| 4284 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4285 | /* |
| 4286 | * Returns a matching context with refcount and pincount. |
| 4287 | */ |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4288 | static struct perf_event_context * |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4289 | find_get_context(struct pmu *pmu, struct task_struct *task, |
| 4290 | struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4291 | { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 4292 | struct perf_event_context *ctx, *clone_ctx = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4293 | struct perf_cpu_context *cpuctx; |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4294 | void *task_ctx_data = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4295 | unsigned long flags; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4296 | int ctxn, err; |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4297 | int cpu = event->cpu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4298 | |
Oleg Nesterov | 22a4ec7 | 2011-01-18 17:10:08 +0100 | [diff] [blame] | 4299 | if (!task) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4300 | /* Must be root to operate on a CPU event: */ |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 4301 | err = perf_allow_cpu(&event->attr); |
| 4302 | if (err) |
| 4303 | return ERR_PTR(err); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4304 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4305 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4306 | ctx = &cpuctx->ctx; |
| 4307 | get_ctx(ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4308 | ++ctx->pin_count; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4309 | |
| 4310 | return ctx; |
| 4311 | } |
| 4312 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4313 | err = -EINVAL; |
| 4314 | ctxn = pmu->task_ctx_nr; |
| 4315 | if (ctxn < 0) |
| 4316 | goto errout; |
| 4317 | |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4318 | if (event->attach_state & PERF_ATTACH_TASK_DATA) { |
| 4319 | task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL); |
| 4320 | if (!task_ctx_data) { |
| 4321 | err = -ENOMEM; |
| 4322 | goto errout; |
| 4323 | } |
| 4324 | } |
| 4325 | |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 4326 | retry: |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4327 | ctx = perf_lock_task_context(task, ctxn, &flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4328 | if (ctx) { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 4329 | clone_ctx = unclone_ctx(ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4330 | ++ctx->pin_count; |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4331 | |
| 4332 | if (task_ctx_data && !ctx->task_ctx_data) { |
| 4333 | ctx->task_ctx_data = task_ctx_data; |
| 4334 | task_ctx_data = NULL; |
| 4335 | } |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4336 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 4337 | |
| 4338 | if (clone_ctx) |
| 4339 | put_ctx(clone_ctx); |
Peter Zijlstra | 9137fb2 | 2011-04-09 21:17:41 +0200 | [diff] [blame] | 4340 | } else { |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4341 | ctx = alloc_perf_context(pmu, task); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4342 | err = -ENOMEM; |
| 4343 | if (!ctx) |
| 4344 | goto errout; |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4345 | |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4346 | if (task_ctx_data) { |
| 4347 | ctx->task_ctx_data = task_ctx_data; |
| 4348 | task_ctx_data = NULL; |
| 4349 | } |
| 4350 | |
Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 4351 | err = 0; |
| 4352 | mutex_lock(&task->perf_event_mutex); |
| 4353 | /* |
| 4354 | * If it has already passed perf_event_exit_task(). |
| 4355 | * we must see PF_EXITING, it takes this mutex too. |
| 4356 | */ |
| 4357 | if (task->flags & PF_EXITING) |
| 4358 | err = -ESRCH; |
| 4359 | else if (task->perf_event_ctxp[ctxn]) |
| 4360 | err = -EAGAIN; |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4361 | else { |
Peter Zijlstra | 9137fb2 | 2011-04-09 21:17:41 +0200 | [diff] [blame] | 4362 | get_ctx(ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4363 | ++ctx->pin_count; |
Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 4364 | rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4365 | } |
Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 4366 | mutex_unlock(&task->perf_event_mutex); |
| 4367 | |
| 4368 | if (unlikely(err)) { |
Peter Zijlstra | 9137fb2 | 2011-04-09 21:17:41 +0200 | [diff] [blame] | 4369 | put_ctx(ctx); |
Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 4370 | |
| 4371 | if (err == -EAGAIN) |
| 4372 | goto retry; |
| 4373 | goto errout; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4374 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4375 | } |
| 4376 | |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4377 | kfree(task_ctx_data); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4378 | return ctx; |
| 4379 | |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 4380 | errout: |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4381 | kfree(task_ctx_data); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4382 | return ERR_PTR(err); |
| 4383 | } |
| 4384 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 4385 | static void perf_event_free_filter(struct perf_event *event); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 4386 | static void perf_event_free_bpf_prog(struct perf_event *event); |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 4387 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4388 | static void free_event_rcu(struct rcu_head *head) |
| 4389 | { |
| 4390 | struct perf_event *event; |
| 4391 | |
| 4392 | event = container_of(head, struct perf_event, rcu_head); |
| 4393 | if (event->ns) |
| 4394 | put_pid_ns(event->ns); |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 4395 | perf_event_free_filter(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4396 | kfree(event); |
| 4397 | } |
| 4398 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 4399 | static void ring_buffer_attach(struct perf_event *event, |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 4400 | struct perf_buffer *rb); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4401 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4402 | static void detach_sb_event(struct perf_event *event) |
| 4403 | { |
| 4404 | struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); |
| 4405 | |
| 4406 | raw_spin_lock(&pel->lock); |
| 4407 | list_del_rcu(&event->sb_list); |
| 4408 | raw_spin_unlock(&pel->lock); |
| 4409 | } |
| 4410 | |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4411 | static bool is_sb_event(struct perf_event *event) |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4412 | { |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4413 | struct perf_event_attr *attr = &event->attr; |
| 4414 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4415 | if (event->parent) |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4416 | return false; |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4417 | |
| 4418 | if (event->attach_state & PERF_ATTACH_TASK) |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4419 | return false; |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4420 | |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4421 | if (attr->mmap || attr->mmap_data || attr->mmap2 || |
| 4422 | attr->comm || attr->comm_exec || |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 4423 | attr->task || attr->ksymbol || |
Song Liu | 21038f2 | 2019-02-25 16:20:05 -0800 | [diff] [blame] | 4424 | attr->context_switch || |
| 4425 | attr->bpf_event) |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4426 | return true; |
| 4427 | return false; |
| 4428 | } |
| 4429 | |
| 4430 | static void unaccount_pmu_sb_event(struct perf_event *event) |
| 4431 | { |
| 4432 | if (is_sb_event(event)) |
| 4433 | detach_sb_event(event); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4434 | } |
| 4435 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4436 | static void unaccount_event_cpu(struct perf_event *event, int cpu) |
| 4437 | { |
| 4438 | if (event->parent) |
| 4439 | return; |
| 4440 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4441 | if (is_cgroup_event(event)) |
| 4442 | atomic_dec(&per_cpu(perf_cgroup_events, cpu)); |
| 4443 | } |
| 4444 | |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 4445 | #ifdef CONFIG_NO_HZ_FULL |
| 4446 | static DEFINE_SPINLOCK(nr_freq_lock); |
| 4447 | #endif |
| 4448 | |
| 4449 | static void unaccount_freq_event_nohz(void) |
| 4450 | { |
| 4451 | #ifdef CONFIG_NO_HZ_FULL |
| 4452 | spin_lock(&nr_freq_lock); |
| 4453 | if (atomic_dec_and_test(&nr_freq_events)) |
| 4454 | tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS); |
| 4455 | spin_unlock(&nr_freq_lock); |
| 4456 | #endif |
| 4457 | } |
| 4458 | |
| 4459 | static void unaccount_freq_event(void) |
| 4460 | { |
| 4461 | if (tick_nohz_full_enabled()) |
| 4462 | unaccount_freq_event_nohz(); |
| 4463 | else |
| 4464 | atomic_dec(&nr_freq_events); |
| 4465 | } |
| 4466 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4467 | static void unaccount_event(struct perf_event *event) |
| 4468 | { |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4469 | bool dec = false; |
| 4470 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4471 | if (event->parent) |
| 4472 | return; |
| 4473 | |
| 4474 | if (event->attach_state & PERF_ATTACH_TASK) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4475 | dec = true; |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4476 | if (event->attr.mmap || event->attr.mmap_data) |
| 4477 | atomic_dec(&nr_mmap_events); |
| 4478 | if (event->attr.comm) |
| 4479 | atomic_dec(&nr_comm_events); |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 4480 | if (event->attr.namespaces) |
| 4481 | atomic_dec(&nr_namespaces_events); |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4482 | if (event->attr.task) |
| 4483 | atomic_dec(&nr_task_events); |
Frederic Weisbecker | 948b26b | 2013-08-02 18:29:55 +0200 | [diff] [blame] | 4484 | if (event->attr.freq) |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 4485 | unaccount_freq_event(); |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 4486 | if (event->attr.context_switch) { |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4487 | dec = true; |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 4488 | atomic_dec(&nr_switch_events); |
| 4489 | } |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4490 | if (is_cgroup_event(event)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4491 | dec = true; |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4492 | if (has_branch_stack(event)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4493 | dec = true; |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 4494 | if (event->attr.ksymbol) |
| 4495 | atomic_dec(&nr_ksymbol_events); |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 4496 | if (event->attr.bpf_event) |
| 4497 | atomic_dec(&nr_bpf_events); |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4498 | |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 4499 | if (dec) { |
| 4500 | if (!atomic_add_unless(&perf_sched_count, -1, 1)) |
| 4501 | schedule_delayed_work(&perf_sched_work, HZ); |
| 4502 | } |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4503 | |
| 4504 | unaccount_event_cpu(event, event->cpu); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4505 | |
| 4506 | unaccount_pmu_sb_event(event); |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4507 | } |
| 4508 | |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 4509 | static void perf_sched_delayed(struct work_struct *work) |
| 4510 | { |
| 4511 | mutex_lock(&perf_sched_mutex); |
| 4512 | if (atomic_dec_and_test(&perf_sched_count)) |
| 4513 | static_branch_disable(&perf_sched_events); |
| 4514 | mutex_unlock(&perf_sched_mutex); |
| 4515 | } |
| 4516 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4517 | /* |
| 4518 | * The following implement mutual exclusion of events on "exclusive" pmus |
| 4519 | * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled |
| 4520 | * at a time, so we disallow creating events that might conflict, namely: |
| 4521 | * |
| 4522 | * 1) cpu-wide events in the presence of per-task events, |
| 4523 | * 2) per-task events in the presence of cpu-wide events, |
| 4524 | * 3) two matching events on the same context. |
| 4525 | * |
| 4526 | * The former two cases are handled in the allocation path (perf_event_alloc(), |
Peter Zijlstra | a0733e6 | 2016-01-26 12:14:40 +0100 | [diff] [blame] | 4527 | * _free_event()), the latter -- before the first perf_install_in_context(). |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4528 | */ |
| 4529 | static int exclusive_event_init(struct perf_event *event) |
| 4530 | { |
| 4531 | struct pmu *pmu = event->pmu; |
| 4532 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 4533 | if (!is_exclusive_pmu(pmu)) |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4534 | return 0; |
| 4535 | |
| 4536 | /* |
| 4537 | * Prevent co-existence of per-task and cpu-wide events on the |
| 4538 | * same exclusive pmu. |
| 4539 | * |
| 4540 | * Negative pmu::exclusive_cnt means there are cpu-wide |
| 4541 | * events on this "exclusive" pmu, positive means there are |
| 4542 | * per-task events. |
| 4543 | * |
| 4544 | * Since this is called in perf_event_alloc() path, event::ctx |
| 4545 | * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK |
| 4546 | * to mean "per-task event", because unlike other attach states it |
| 4547 | * never gets cleared. |
| 4548 | */ |
| 4549 | if (event->attach_state & PERF_ATTACH_TASK) { |
| 4550 | if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) |
| 4551 | return -EBUSY; |
| 4552 | } else { |
| 4553 | if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) |
| 4554 | return -EBUSY; |
| 4555 | } |
| 4556 | |
| 4557 | return 0; |
| 4558 | } |
| 4559 | |
| 4560 | static void exclusive_event_destroy(struct perf_event *event) |
| 4561 | { |
| 4562 | struct pmu *pmu = event->pmu; |
| 4563 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 4564 | if (!is_exclusive_pmu(pmu)) |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4565 | return; |
| 4566 | |
| 4567 | /* see comment in exclusive_event_init() */ |
| 4568 | if (event->attach_state & PERF_ATTACH_TASK) |
| 4569 | atomic_dec(&pmu->exclusive_cnt); |
| 4570 | else |
| 4571 | atomic_inc(&pmu->exclusive_cnt); |
| 4572 | } |
| 4573 | |
| 4574 | static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) |
| 4575 | { |
Alexander Shishkin | 3bf6215 | 2016-09-20 18:48:11 +0300 | [diff] [blame] | 4576 | if ((e1->pmu == e2->pmu) && |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4577 | (e1->cpu == e2->cpu || |
| 4578 | e1->cpu == -1 || |
| 4579 | e2->cpu == -1)) |
| 4580 | return true; |
| 4581 | return false; |
| 4582 | } |
| 4583 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4584 | static bool exclusive_event_installable(struct perf_event *event, |
| 4585 | struct perf_event_context *ctx) |
| 4586 | { |
| 4587 | struct perf_event *iter_event; |
| 4588 | struct pmu *pmu = event->pmu; |
| 4589 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 4590 | lockdep_assert_held(&ctx->mutex); |
| 4591 | |
| 4592 | if (!is_exclusive_pmu(pmu)) |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4593 | return true; |
| 4594 | |
| 4595 | list_for_each_entry(iter_event, &ctx->event_list, event_entry) { |
| 4596 | if (exclusive_event_match(iter_event, event)) |
| 4597 | return false; |
| 4598 | } |
| 4599 | |
| 4600 | return true; |
| 4601 | } |
| 4602 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 4603 | static void perf_addr_filters_splice(struct perf_event *event, |
| 4604 | struct list_head *head); |
| 4605 | |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 4606 | static void _free_event(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4607 | { |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 4608 | irq_work_sync(&event->pending); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4609 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4610 | unaccount_event(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4611 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 4612 | security_perf_event_free(event); |
| 4613 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 4614 | if (event->rb) { |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 4615 | /* |
| 4616 | * Can happen when we close an event with re-directed output. |
| 4617 | * |
| 4618 | * Since we have a 0 refcount, perf_mmap_close() will skip |
| 4619 | * over us; possibly making our ring_buffer_put() the last. |
| 4620 | */ |
| 4621 | mutex_lock(&event->mmap_mutex); |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 4622 | ring_buffer_attach(event, NULL); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 4623 | mutex_unlock(&event->mmap_mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4624 | } |
| 4625 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 4626 | if (is_cgroup_event(event)) |
| 4627 | perf_detach_cgroup(event); |
| 4628 | |
Peter Zijlstra | a0733e6 | 2016-01-26 12:14:40 +0100 | [diff] [blame] | 4629 | if (!event->parent) { |
| 4630 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) |
| 4631 | put_callchain_buffers(); |
| 4632 | } |
| 4633 | |
| 4634 | perf_event_free_bpf_prog(event); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 4635 | perf_addr_filters_splice(event, NULL); |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 4636 | kfree(event->addr_filter_ranges); |
Peter Zijlstra | a0733e6 | 2016-01-26 12:14:40 +0100 | [diff] [blame] | 4637 | |
| 4638 | if (event->destroy) |
| 4639 | event->destroy(event); |
| 4640 | |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 4641 | /* |
| 4642 | * Must be after ->destroy(), due to uprobe_perf_close() using |
| 4643 | * hw.target. |
| 4644 | */ |
Prashant Bhole | 621b6d2 | 2018-04-09 19:03:46 +0900 | [diff] [blame] | 4645 | if (event->hw.target) |
| 4646 | put_task_struct(event->hw.target); |
| 4647 | |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 4648 | /* |
| 4649 | * perf_event_free_task() relies on put_ctx() being 'last', in particular |
| 4650 | * all task references must be cleaned up. |
| 4651 | */ |
| 4652 | if (event->ctx) |
| 4653 | put_ctx(event->ctx); |
| 4654 | |
Alexander Shishkin | 62a92c8 | 2016-06-07 15:44:15 +0300 | [diff] [blame] | 4655 | exclusive_event_destroy(event); |
| 4656 | module_put(event->pmu->module); |
Peter Zijlstra | a0733e6 | 2016-01-26 12:14:40 +0100 | [diff] [blame] | 4657 | |
| 4658 | call_rcu(&event->rcu_head, free_event_rcu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4659 | } |
| 4660 | |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 4661 | /* |
| 4662 | * Used to free events which have a known refcount of 1, such as in error paths |
| 4663 | * where the event isn't exposed yet and inherited events. |
| 4664 | */ |
| 4665 | static void free_event(struct perf_event *event) |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 4666 | { |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 4667 | if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, |
| 4668 | "unexpected event refcount: %ld; ptr=%p\n", |
| 4669 | atomic_long_read(&event->refcount), event)) { |
| 4670 | /* leak to avoid use-after-free */ |
| 4671 | return; |
| 4672 | } |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 4673 | |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 4674 | _free_event(event); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 4675 | } |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 4676 | |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 4677 | /* |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 4678 | * Remove user event from the owner task. |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 4679 | */ |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 4680 | static void perf_remove_from_owner(struct perf_event *event) |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 4681 | { |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4682 | struct task_struct *owner; |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 4683 | |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4684 | rcu_read_lock(); |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4685 | /* |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 4686 | * Matches the smp_store_release() in perf_event_exit_task(). If we |
| 4687 | * observe !owner it means the list deletion is complete and we can |
| 4688 | * indeed free this event, otherwise we need to serialize on |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4689 | * owner->perf_event_mutex. |
| 4690 | */ |
Will Deacon | 506458e | 2017-10-24 11:22:48 +0100 | [diff] [blame] | 4691 | owner = READ_ONCE(event->owner); |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4692 | if (owner) { |
| 4693 | /* |
| 4694 | * Since delayed_put_task_struct() also drops the last |
| 4695 | * task reference we can safely take a new reference |
| 4696 | * while holding the rcu_read_lock(). |
| 4697 | */ |
| 4698 | get_task_struct(owner); |
| 4699 | } |
| 4700 | rcu_read_unlock(); |
| 4701 | |
| 4702 | if (owner) { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 4703 | /* |
| 4704 | * If we're here through perf_event_exit_task() we're already |
| 4705 | * holding ctx->mutex which would be an inversion wrt. the |
| 4706 | * normal lock order. |
| 4707 | * |
| 4708 | * However we can safely take this lock because its the child |
| 4709 | * ctx->mutex. |
| 4710 | */ |
| 4711 | mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); |
| 4712 | |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4713 | /* |
| 4714 | * We have to re-check the event->owner field, if it is cleared |
| 4715 | * we raced with perf_event_exit_task(), acquiring the mutex |
| 4716 | * ensured they're done, and we can proceed with freeing the |
| 4717 | * event. |
| 4718 | */ |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 4719 | if (event->owner) { |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4720 | list_del_init(&event->owner_entry); |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 4721 | smp_store_release(&event->owner, NULL); |
| 4722 | } |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4723 | mutex_unlock(&owner->perf_event_mutex); |
| 4724 | put_task_struct(owner); |
| 4725 | } |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 4726 | } |
| 4727 | |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 4728 | static void put_event(struct perf_event *event) |
| 4729 | { |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 4730 | if (!atomic_long_dec_and_test(&event->refcount)) |
| 4731 | return; |
| 4732 | |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 4733 | _free_event(event); |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 4734 | } |
| 4735 | |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4736 | /* |
| 4737 | * Kill an event dead; while event:refcount will preserve the event |
| 4738 | * object, it will not preserve its functionality. Once the last 'user' |
| 4739 | * gives up the object, we'll destroy the thing. |
| 4740 | */ |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 4741 | int perf_event_release_kernel(struct perf_event *event) |
| 4742 | { |
Peter Zijlstra | a4f4bb6 | 2016-02-24 18:45:42 +0100 | [diff] [blame] | 4743 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4744 | struct perf_event *child, *tmp; |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 4745 | LIST_HEAD(free_list); |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4746 | |
Peter Zijlstra | a4f4bb6 | 2016-02-24 18:45:42 +0100 | [diff] [blame] | 4747 | /* |
| 4748 | * If we got here through err_file: fput(event_file); we will not have |
| 4749 | * attached to a context yet. |
| 4750 | */ |
| 4751 | if (!ctx) { |
| 4752 | WARN_ON_ONCE(event->attach_state & |
| 4753 | (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP)); |
| 4754 | goto no_ctx; |
| 4755 | } |
| 4756 | |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4757 | if (!is_kernel_event(event)) |
| 4758 | perf_remove_from_owner(event); |
| 4759 | |
Peter Zijlstra | 5fa7c8e | 2016-01-26 15:25:15 +0100 | [diff] [blame] | 4760 | ctx = perf_event_ctx_lock(event); |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 4761 | WARN_ON_ONCE(ctx->parent_ctx); |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 4762 | perf_remove_from_context(event, DETACH_GROUP); |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 4763 | |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 4764 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 60beda8 | 2016-01-26 14:55:02 +0100 | [diff] [blame] | 4765 | /* |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 4766 | * Mark this event as STATE_DEAD, there is no external reference to it |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 4767 | * anymore. |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4768 | * |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 4769 | * Anybody acquiring event->child_mutex after the below loop _must_ |
| 4770 | * also see this, most importantly inherit_event() which will avoid |
| 4771 | * placing more children on the list. |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4772 | * |
| 4773 | * Thus this guarantees that we will in fact observe and kill _ALL_ |
| 4774 | * child events. |
Peter Zijlstra | 60beda8 | 2016-01-26 14:55:02 +0100 | [diff] [blame] | 4775 | */ |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 4776 | event->state = PERF_EVENT_STATE_DEAD; |
| 4777 | raw_spin_unlock_irq(&ctx->lock); |
| 4778 | |
| 4779 | perf_event_ctx_unlock(event, ctx); |
Peter Zijlstra | 60beda8 | 2016-01-26 14:55:02 +0100 | [diff] [blame] | 4780 | |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4781 | again: |
| 4782 | mutex_lock(&event->child_mutex); |
| 4783 | list_for_each_entry(child, &event->child_list, child_list) { |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 4784 | |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4785 | /* |
| 4786 | * Cannot change, child events are not migrated, see the |
| 4787 | * comment with perf_event_ctx_lock_nested(). |
| 4788 | */ |
Will Deacon | 506458e | 2017-10-24 11:22:48 +0100 | [diff] [blame] | 4789 | ctx = READ_ONCE(child->ctx); |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4790 | /* |
| 4791 | * Since child_mutex nests inside ctx::mutex, we must jump |
| 4792 | * through hoops. We start by grabbing a reference on the ctx. |
| 4793 | * |
| 4794 | * Since the event cannot get freed while we hold the |
| 4795 | * child_mutex, the context must also exist and have a !0 |
| 4796 | * reference count. |
| 4797 | */ |
| 4798 | get_ctx(ctx); |
| 4799 | |
| 4800 | /* |
| 4801 | * Now that we have a ctx ref, we can drop child_mutex, and |
| 4802 | * acquire ctx::mutex without fear of it going away. Then we |
| 4803 | * can re-acquire child_mutex. |
| 4804 | */ |
| 4805 | mutex_unlock(&event->child_mutex); |
| 4806 | mutex_lock(&ctx->mutex); |
| 4807 | mutex_lock(&event->child_mutex); |
| 4808 | |
| 4809 | /* |
| 4810 | * Now that we hold ctx::mutex and child_mutex, revalidate our |
| 4811 | * state, if child is still the first entry, it didn't get freed |
| 4812 | * and we can continue doing so. |
| 4813 | */ |
| 4814 | tmp = list_first_entry_or_null(&event->child_list, |
| 4815 | struct perf_event, child_list); |
| 4816 | if (tmp == child) { |
| 4817 | perf_remove_from_context(child, DETACH_GROUP); |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 4818 | list_move(&child->child_list, &free_list); |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4819 | /* |
| 4820 | * This matches the refcount bump in inherit_event(); |
| 4821 | * this can't be the last reference. |
| 4822 | */ |
| 4823 | put_event(event); |
| 4824 | } |
| 4825 | |
| 4826 | mutex_unlock(&event->child_mutex); |
| 4827 | mutex_unlock(&ctx->mutex); |
| 4828 | put_ctx(ctx); |
| 4829 | goto again; |
| 4830 | } |
| 4831 | mutex_unlock(&event->child_mutex); |
| 4832 | |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 4833 | list_for_each_entry_safe(child, tmp, &free_list, child_list) { |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 4834 | void *var = &child->ctx->refcount; |
| 4835 | |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 4836 | list_del(&child->child_list); |
| 4837 | free_event(child); |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 4838 | |
| 4839 | /* |
| 4840 | * Wake any perf_event_free_task() waiting for this event to be |
| 4841 | * freed. |
| 4842 | */ |
| 4843 | smp_mb(); /* pairs with wait_var_event() */ |
| 4844 | wake_up_var(var); |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 4845 | } |
| 4846 | |
Peter Zijlstra | a4f4bb6 | 2016-02-24 18:45:42 +0100 | [diff] [blame] | 4847 | no_ctx: |
| 4848 | put_event(event); /* Must be the 'last' reference */ |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 4849 | return 0; |
| 4850 | } |
| 4851 | EXPORT_SYMBOL_GPL(perf_event_release_kernel); |
| 4852 | |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 4853 | /* |
| 4854 | * Called when the last reference to the file is gone. |
| 4855 | */ |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 4856 | static int perf_release(struct inode *inode, struct file *file) |
| 4857 | { |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 4858 | perf_event_release_kernel(file->private_data); |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 4859 | return 0; |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 4860 | } |
| 4861 | |
Peter Zijlstra | ca0dd44 | 2017-09-05 13:23:44 +0200 | [diff] [blame] | 4862 | static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4863 | { |
| 4864 | struct perf_event *child; |
| 4865 | u64 total = 0; |
| 4866 | |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 4867 | *enabled = 0; |
| 4868 | *running = 0; |
| 4869 | |
Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 4870 | mutex_lock(&event->child_mutex); |
Sukadev Bhattiprolu | 01add3e | 2015-09-03 20:07:46 -0700 | [diff] [blame] | 4871 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4872 | (void)perf_event_read(event, false); |
Sukadev Bhattiprolu | 01add3e | 2015-09-03 20:07:46 -0700 | [diff] [blame] | 4873 | total += perf_event_count(event); |
| 4874 | |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 4875 | *enabled += event->total_time_enabled + |
| 4876 | atomic64_read(&event->child_total_time_enabled); |
| 4877 | *running += event->total_time_running + |
| 4878 | atomic64_read(&event->child_total_time_running); |
| 4879 | |
| 4880 | list_for_each_entry(child, &event->child_list, child_list) { |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4881 | (void)perf_event_read(child, false); |
Sukadev Bhattiprolu | 01add3e | 2015-09-03 20:07:46 -0700 | [diff] [blame] | 4882 | total += perf_event_count(child); |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 4883 | *enabled += child->total_time_enabled; |
| 4884 | *running += child->total_time_running; |
| 4885 | } |
Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 4886 | mutex_unlock(&event->child_mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4887 | |
| 4888 | return total; |
| 4889 | } |
Peter Zijlstra | ca0dd44 | 2017-09-05 13:23:44 +0200 | [diff] [blame] | 4890 | |
| 4891 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) |
| 4892 | { |
| 4893 | struct perf_event_context *ctx; |
| 4894 | u64 count; |
| 4895 | |
| 4896 | ctx = perf_event_ctx_lock(event); |
| 4897 | count = __perf_event_read_value(event, enabled, running); |
| 4898 | perf_event_ctx_unlock(event, ctx); |
| 4899 | |
| 4900 | return count; |
| 4901 | } |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 4902 | EXPORT_SYMBOL_GPL(perf_event_read_value); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4903 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4904 | static int __perf_read_group_add(struct perf_event *leader, |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4905 | u64 read_format, u64 *values) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4906 | { |
Jiri Olsa | 2aeb188 | 2017-07-20 16:14:55 +0200 | [diff] [blame] | 4907 | struct perf_event_context *ctx = leader->ctx; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4908 | struct perf_event *sub; |
Jiri Olsa | 2aeb188 | 2017-07-20 16:14:55 +0200 | [diff] [blame] | 4909 | unsigned long flags; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4910 | int n = 1; /* skip @nr */ |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4911 | int ret; |
Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 4912 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4913 | ret = perf_event_read(leader, true); |
| 4914 | if (ret) |
| 4915 | return ret; |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 4916 | |
Peter Zijlstra | a9cd819 | 2017-09-05 13:38:24 +0200 | [diff] [blame] | 4917 | raw_spin_lock_irqsave(&ctx->lock, flags); |
| 4918 | |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4919 | /* |
| 4920 | * Since we co-schedule groups, {enabled,running} times of siblings |
| 4921 | * will be identical to those of the leader, so we only publish one |
| 4922 | * set. |
| 4923 | */ |
| 4924 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
| 4925 | values[n++] += leader->total_time_enabled + |
| 4926 | atomic64_read(&leader->child_total_time_enabled); |
| 4927 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4928 | |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4929 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
| 4930 | values[n++] += leader->total_time_running + |
| 4931 | atomic64_read(&leader->child_total_time_running); |
| 4932 | } |
| 4933 | |
| 4934 | /* |
| 4935 | * Write {count,id} tuples for every sibling. |
| 4936 | */ |
| 4937 | values[n++] += perf_event_count(leader); |
Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 4938 | if (read_format & PERF_FORMAT_ID) |
| 4939 | values[n++] = primary_event_id(leader); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4940 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 4941 | for_each_sibling_event(sub, leader) { |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4942 | values[n++] += perf_event_count(sub); |
Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 4943 | if (read_format & PERF_FORMAT_ID) |
| 4944 | values[n++] = primary_event_id(sub); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4945 | } |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4946 | |
Jiri Olsa | 2aeb188 | 2017-07-20 16:14:55 +0200 | [diff] [blame] | 4947 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4948 | return 0; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4949 | } |
| 4950 | |
| 4951 | static int perf_read_group(struct perf_event *event, |
| 4952 | u64 read_format, char __user *buf) |
| 4953 | { |
| 4954 | struct perf_event *leader = event->group_leader, *child; |
| 4955 | struct perf_event_context *ctx = leader->ctx; |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4956 | int ret; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4957 | u64 *values; |
| 4958 | |
| 4959 | lockdep_assert_held(&ctx->mutex); |
| 4960 | |
| 4961 | values = kzalloc(event->read_size, GFP_KERNEL); |
| 4962 | if (!values) |
| 4963 | return -ENOMEM; |
| 4964 | |
| 4965 | values[0] = 1 + leader->nr_siblings; |
| 4966 | |
| 4967 | /* |
| 4968 | * By locking the child_mutex of the leader we effectively |
| 4969 | * lock the child list of all siblings.. XXX explain how. |
| 4970 | */ |
| 4971 | mutex_lock(&leader->child_mutex); |
| 4972 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4973 | ret = __perf_read_group_add(leader, read_format, values); |
| 4974 | if (ret) |
| 4975 | goto unlock; |
| 4976 | |
| 4977 | list_for_each_entry(child, &leader->child_list, child_list) { |
| 4978 | ret = __perf_read_group_add(child, read_format, values); |
| 4979 | if (ret) |
| 4980 | goto unlock; |
| 4981 | } |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4982 | |
| 4983 | mutex_unlock(&leader->child_mutex); |
| 4984 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4985 | ret = event->read_size; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4986 | if (copy_to_user(buf, values, event->read_size)) |
| 4987 | ret = -EFAULT; |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4988 | goto out; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4989 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4990 | unlock: |
| 4991 | mutex_unlock(&leader->child_mutex); |
| 4992 | out: |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 4993 | kfree(values); |
Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 4994 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4995 | } |
| 4996 | |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 4997 | static int perf_read_one(struct perf_event *event, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4998 | u64 read_format, char __user *buf) |
| 4999 | { |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 5000 | u64 enabled, running; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5001 | u64 values[4]; |
| 5002 | int n = 0; |
| 5003 | |
Peter Zijlstra | ca0dd44 | 2017-09-05 13:23:44 +0200 | [diff] [blame] | 5004 | values[n++] = __perf_event_read_value(event, &enabled, &running); |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 5005 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 5006 | values[n++] = enabled; |
| 5007 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 5008 | values[n++] = running; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5009 | if (read_format & PERF_FORMAT_ID) |
| 5010 | values[n++] = primary_event_id(event); |
| 5011 | |
| 5012 | if (copy_to_user(buf, values, n * sizeof(u64))) |
| 5013 | return -EFAULT; |
| 5014 | |
| 5015 | return n * sizeof(u64); |
| 5016 | } |
| 5017 | |
Jiri Olsa | dc63398 | 2014-09-12 13:18:26 +0200 | [diff] [blame] | 5018 | static bool is_event_hup(struct perf_event *event) |
| 5019 | { |
| 5020 | bool no_children; |
| 5021 | |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 5022 | if (event->state > PERF_EVENT_STATE_EXIT) |
Jiri Olsa | dc63398 | 2014-09-12 13:18:26 +0200 | [diff] [blame] | 5023 | return false; |
| 5024 | |
| 5025 | mutex_lock(&event->child_mutex); |
| 5026 | no_children = list_empty(&event->child_list); |
| 5027 | mutex_unlock(&event->child_mutex); |
| 5028 | return no_children; |
| 5029 | } |
| 5030 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5031 | /* |
| 5032 | * Read the performance event - simple non blocking version for now |
| 5033 | */ |
| 5034 | static ssize_t |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5035 | __perf_read(struct perf_event *event, char __user *buf, size_t count) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5036 | { |
| 5037 | u64 read_format = event->attr.read_format; |
| 5038 | int ret; |
| 5039 | |
| 5040 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 5041 | * Return end-of-file for a read on an event that is in |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5042 | * error state (i.e. because it was pinned but it couldn't be |
| 5043 | * scheduled on to the CPU at some point). |
| 5044 | */ |
| 5045 | if (event->state == PERF_EVENT_STATE_ERROR) |
| 5046 | return 0; |
| 5047 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 5048 | if (count < event->read_size) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5049 | return -ENOSPC; |
| 5050 | |
| 5051 | WARN_ON_ONCE(event->ctx->parent_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5052 | if (read_format & PERF_FORMAT_GROUP) |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5053 | ret = perf_read_group(event, read_format, buf); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5054 | else |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5055 | ret = perf_read_one(event, read_format, buf); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5056 | |
| 5057 | return ret; |
| 5058 | } |
| 5059 | |
| 5060 | static ssize_t |
| 5061 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
| 5062 | { |
| 5063 | struct perf_event *event = file->private_data; |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5064 | struct perf_event_context *ctx; |
| 5065 | int ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5066 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 5067 | ret = security_perf_event_read(event); |
| 5068 | if (ret) |
| 5069 | return ret; |
| 5070 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5071 | ctx = perf_event_ctx_lock(event); |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5072 | ret = __perf_read(event, buf, count); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5073 | perf_event_ctx_unlock(event, ctx); |
| 5074 | |
| 5075 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5076 | } |
| 5077 | |
Al Viro | 9dd9574 | 2017-07-03 00:42:43 -0400 | [diff] [blame] | 5078 | static __poll_t perf_poll(struct file *file, poll_table *wait) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5079 | { |
| 5080 | struct perf_event *event = file->private_data; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5081 | struct perf_buffer *rb; |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 5082 | __poll_t events = EPOLLHUP; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5083 | |
Sebastian Andrzej Siewior | e708d7a | 2014-08-04 15:31:08 +0200 | [diff] [blame] | 5084 | poll_wait(file, &event->waitq, wait); |
Jiri Olsa | 179033b | 2014-08-07 11:48:26 -0400 | [diff] [blame] | 5085 | |
Jiri Olsa | dc63398 | 2014-09-12 13:18:26 +0200 | [diff] [blame] | 5086 | if (is_event_hup(event)) |
Jiri Olsa | 179033b | 2014-08-07 11:48:26 -0400 | [diff] [blame] | 5087 | return events; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5088 | |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5089 | /* |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5090 | * Pin the event->rb by taking event->mmap_mutex; otherwise |
| 5091 | * perf_event_set_output() can swizzle our rb and make us miss wakeups. |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5092 | */ |
| 5093 | mutex_lock(&event->mmap_mutex); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5094 | rb = event->rb; |
| 5095 | if (rb) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5096 | events = atomic_xchg(&rb->poll, 0); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5097 | mutex_unlock(&event->mmap_mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5098 | return events; |
| 5099 | } |
| 5100 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5101 | static void _perf_event_reset(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5102 | { |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5103 | (void)perf_event_read(event, false); |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 5104 | local64_set(&event->count, 0); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5105 | perf_event_update_userpage(event); |
| 5106 | } |
| 5107 | |
Like Xu | 52ba4b0 | 2019-10-27 18:52:39 +0800 | [diff] [blame] | 5108 | /* Assume it's not an event with inherit set. */ |
| 5109 | u64 perf_event_pause(struct perf_event *event, bool reset) |
| 5110 | { |
| 5111 | struct perf_event_context *ctx; |
| 5112 | u64 count; |
| 5113 | |
| 5114 | ctx = perf_event_ctx_lock(event); |
| 5115 | WARN_ON_ONCE(event->attr.inherit); |
| 5116 | _perf_event_disable(event); |
| 5117 | count = local64_read(&event->count); |
| 5118 | if (reset) |
| 5119 | local64_set(&event->count, 0); |
| 5120 | perf_event_ctx_unlock(event, ctx); |
| 5121 | |
| 5122 | return count; |
| 5123 | } |
| 5124 | EXPORT_SYMBOL_GPL(perf_event_pause); |
| 5125 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5126 | /* |
| 5127 | * Holding the top-level event's child_mutex means that any |
| 5128 | * descendant process that has inherited this event will block |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 5129 | * in perf_event_exit_event() if it goes to exit, thus satisfying the |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5130 | * task existence requirements of perf_event_enable/disable. |
| 5131 | */ |
| 5132 | static void perf_event_for_each_child(struct perf_event *event, |
| 5133 | void (*func)(struct perf_event *)) |
| 5134 | { |
| 5135 | struct perf_event *child; |
| 5136 | |
| 5137 | WARN_ON_ONCE(event->ctx->parent_ctx); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5138 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5139 | mutex_lock(&event->child_mutex); |
| 5140 | func(event); |
| 5141 | list_for_each_entry(child, &event->child_list, child_list) |
| 5142 | func(child); |
| 5143 | mutex_unlock(&event->child_mutex); |
| 5144 | } |
| 5145 | |
| 5146 | static void perf_event_for_each(struct perf_event *event, |
| 5147 | void (*func)(struct perf_event *)) |
| 5148 | { |
| 5149 | struct perf_event_context *ctx = event->ctx; |
| 5150 | struct perf_event *sibling; |
| 5151 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5152 | lockdep_assert_held(&ctx->mutex); |
| 5153 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5154 | event = event->group_leader; |
| 5155 | |
| 5156 | perf_event_for_each_child(event, func); |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 5157 | for_each_sibling_event(sibling, event) |
Michael Ellerman | 724b6da | 2012-04-11 11:54:13 +1000 | [diff] [blame] | 5158 | perf_event_for_each_child(sibling, func); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5159 | } |
| 5160 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 5161 | static void __perf_event_period(struct perf_event *event, |
| 5162 | struct perf_cpu_context *cpuctx, |
| 5163 | struct perf_event_context *ctx, |
| 5164 | void *info) |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 5165 | { |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 5166 | u64 value = *((u64 *)info); |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5167 | bool active; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5168 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5169 | if (event->attr.freq) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5170 | event->attr.sample_freq = value; |
| 5171 | } else { |
| 5172 | event->attr.sample_period = value; |
| 5173 | event->hw.sample_period = value; |
| 5174 | } |
Peter Zijlstra | bad7192 | 2013-11-27 13:54:38 +0000 | [diff] [blame] | 5175 | |
| 5176 | active = (event->state == PERF_EVENT_STATE_ACTIVE); |
| 5177 | if (active) { |
| 5178 | perf_pmu_disable(ctx->pmu); |
Peter Zijlstra | 1e02cd4 | 2016-03-10 15:39:24 +0100 | [diff] [blame] | 5179 | /* |
| 5180 | * We could be throttled; unthrottle now to avoid the tick |
| 5181 | * trying to unthrottle while we already re-started the event. |
| 5182 | */ |
| 5183 | if (event->hw.interrupts == MAX_INTERRUPTS) { |
| 5184 | event->hw.interrupts = 0; |
| 5185 | perf_log_throttle(event, 1); |
| 5186 | } |
Peter Zijlstra | bad7192 | 2013-11-27 13:54:38 +0000 | [diff] [blame] | 5187 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 5188 | } |
| 5189 | |
| 5190 | local64_set(&event->hw.period_left, 0); |
| 5191 | |
| 5192 | if (active) { |
| 5193 | event->pmu->start(event, PERF_EF_RELOAD); |
| 5194 | perf_pmu_enable(ctx->pmu); |
| 5195 | } |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5196 | } |
| 5197 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 5198 | static int perf_event_check_period(struct perf_event *event, u64 value) |
| 5199 | { |
| 5200 | return event->pmu->check_period(event, value); |
| 5201 | } |
| 5202 | |
Like Xu | 3ca270f | 2019-10-27 18:52:38 +0800 | [diff] [blame] | 5203 | static int _perf_event_period(struct perf_event *event, u64 value) |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5204 | { |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5205 | if (!is_sampling_event(event)) |
| 5206 | return -EINVAL; |
| 5207 | |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5208 | if (!value) |
| 5209 | return -EINVAL; |
| 5210 | |
| 5211 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
| 5212 | return -EINVAL; |
| 5213 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 5214 | if (perf_event_check_period(event, value)) |
| 5215 | return -EINVAL; |
| 5216 | |
Ravi Bangoria | 913a90b | 2019-06-04 09:59:53 +0530 | [diff] [blame] | 5217 | if (!event->attr.freq && (value & (1ULL << 63))) |
| 5218 | return -EINVAL; |
| 5219 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 5220 | event_function_call(event, __perf_event_period, &value); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5221 | |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5222 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5223 | } |
| 5224 | |
Like Xu | 3ca270f | 2019-10-27 18:52:38 +0800 | [diff] [blame] | 5225 | int perf_event_period(struct perf_event *event, u64 value) |
| 5226 | { |
| 5227 | struct perf_event_context *ctx; |
| 5228 | int ret; |
| 5229 | |
| 5230 | ctx = perf_event_ctx_lock(event); |
| 5231 | ret = _perf_event_period(event, value); |
| 5232 | perf_event_ctx_unlock(event, ctx); |
| 5233 | |
| 5234 | return ret; |
| 5235 | } |
| 5236 | EXPORT_SYMBOL_GPL(perf_event_period); |
| 5237 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5238 | static const struct file_operations perf_fops; |
| 5239 | |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5240 | static inline int perf_fget_light(int fd, struct fd *p) |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5241 | { |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5242 | struct fd f = fdget(fd); |
| 5243 | if (!f.file) |
| 5244 | return -EBADF; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5245 | |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5246 | if (f.file->f_op != &perf_fops) { |
| 5247 | fdput(f); |
| 5248 | return -EBADF; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5249 | } |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5250 | *p = f; |
| 5251 | return 0; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5252 | } |
| 5253 | |
| 5254 | static int perf_event_set_output(struct perf_event *event, |
| 5255 | struct perf_event *output_event); |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5256 | static int perf_event_set_filter(struct perf_event *event, void __user *arg); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 5257 | static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd); |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 5258 | static int perf_copy_attr(struct perf_event_attr __user *uattr, |
| 5259 | struct perf_event_attr *attr); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5260 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5261 | static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5262 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5263 | void (*func)(struct perf_event *); |
| 5264 | u32 flags = arg; |
| 5265 | |
| 5266 | switch (cmd) { |
| 5267 | case PERF_EVENT_IOC_ENABLE: |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5268 | func = _perf_event_enable; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5269 | break; |
| 5270 | case PERF_EVENT_IOC_DISABLE: |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5271 | func = _perf_event_disable; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5272 | break; |
| 5273 | case PERF_EVENT_IOC_RESET: |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5274 | func = _perf_event_reset; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5275 | break; |
| 5276 | |
| 5277 | case PERF_EVENT_IOC_REFRESH: |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5278 | return _perf_event_refresh(event, arg); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5279 | |
| 5280 | case PERF_EVENT_IOC_PERIOD: |
Like Xu | 3ca270f | 2019-10-27 18:52:38 +0800 | [diff] [blame] | 5281 | { |
| 5282 | u64 value; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5283 | |
Like Xu | 3ca270f | 2019-10-27 18:52:38 +0800 | [diff] [blame] | 5284 | if (copy_from_user(&value, (u64 __user *)arg, sizeof(value))) |
| 5285 | return -EFAULT; |
| 5286 | |
| 5287 | return _perf_event_period(event, value); |
| 5288 | } |
Jiri Olsa | cf4957f | 2012-10-24 13:37:58 +0200 | [diff] [blame] | 5289 | case PERF_EVENT_IOC_ID: |
| 5290 | { |
| 5291 | u64 id = primary_event_id(event); |
| 5292 | |
| 5293 | if (copy_to_user((void __user *)arg, &id, sizeof(id))) |
| 5294 | return -EFAULT; |
| 5295 | return 0; |
| 5296 | } |
| 5297 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5298 | case PERF_EVENT_IOC_SET_OUTPUT: |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5299 | { |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5300 | int ret; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5301 | if (arg != -1) { |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5302 | struct perf_event *output_event; |
| 5303 | struct fd output; |
| 5304 | ret = perf_fget_light(arg, &output); |
| 5305 | if (ret) |
| 5306 | return ret; |
| 5307 | output_event = output.file->private_data; |
| 5308 | ret = perf_event_set_output(event, output_event); |
| 5309 | fdput(output); |
| 5310 | } else { |
| 5311 | ret = perf_event_set_output(event, NULL); |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5312 | } |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5313 | return ret; |
| 5314 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5315 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5316 | case PERF_EVENT_IOC_SET_FILTER: |
| 5317 | return perf_event_set_filter(event, (void __user *)arg); |
| 5318 | |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 5319 | case PERF_EVENT_IOC_SET_BPF: |
| 5320 | return perf_event_set_bpf_prog(event, arg); |
| 5321 | |
Wang Nan | 86e7972 | 2016-03-28 06:41:29 +0000 | [diff] [blame] | 5322 | case PERF_EVENT_IOC_PAUSE_OUTPUT: { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5323 | struct perf_buffer *rb; |
Wang Nan | 86e7972 | 2016-03-28 06:41:29 +0000 | [diff] [blame] | 5324 | |
| 5325 | rcu_read_lock(); |
| 5326 | rb = rcu_dereference(event->rb); |
| 5327 | if (!rb || !rb->nr_pages) { |
| 5328 | rcu_read_unlock(); |
| 5329 | return -EINVAL; |
| 5330 | } |
| 5331 | rb_toggle_paused(rb, !!arg); |
| 5332 | rcu_read_unlock(); |
| 5333 | return 0; |
| 5334 | } |
Yonghong Song | f371b30 | 2017-12-11 11:39:02 -0800 | [diff] [blame] | 5335 | |
| 5336 | case PERF_EVENT_IOC_QUERY_BPF: |
Yonghong Song | f4e2298 | 2017-12-13 10:35:37 -0800 | [diff] [blame] | 5337 | return perf_event_query_prog_array(event, (void __user *)arg); |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 5338 | |
| 5339 | case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: { |
| 5340 | struct perf_event_attr new_attr; |
| 5341 | int err = perf_copy_attr((struct perf_event_attr __user *)arg, |
| 5342 | &new_attr); |
| 5343 | |
| 5344 | if (err) |
| 5345 | return err; |
| 5346 | |
| 5347 | return perf_event_modify_attr(event, &new_attr); |
| 5348 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5349 | default: |
| 5350 | return -ENOTTY; |
| 5351 | } |
| 5352 | |
| 5353 | if (flags & PERF_IOC_FLAG_GROUP) |
| 5354 | perf_event_for_each(event, func); |
| 5355 | else |
| 5356 | perf_event_for_each_child(event, func); |
| 5357 | |
| 5358 | return 0; |
| 5359 | } |
| 5360 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5361 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 5362 | { |
| 5363 | struct perf_event *event = file->private_data; |
| 5364 | struct perf_event_context *ctx; |
| 5365 | long ret; |
| 5366 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 5367 | /* Treat ioctl like writes as it is likely a mutating operation. */ |
| 5368 | ret = security_perf_event_write(event); |
| 5369 | if (ret) |
| 5370 | return ret; |
| 5371 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5372 | ctx = perf_event_ctx_lock(event); |
| 5373 | ret = _perf_ioctl(event, cmd, arg); |
| 5374 | perf_event_ctx_unlock(event, ctx); |
| 5375 | |
| 5376 | return ret; |
| 5377 | } |
| 5378 | |
Pawel Moll | b3f2078 | 2014-06-13 16:03:32 +0100 | [diff] [blame] | 5379 | #ifdef CONFIG_COMPAT |
| 5380 | static long perf_compat_ioctl(struct file *file, unsigned int cmd, |
| 5381 | unsigned long arg) |
| 5382 | { |
| 5383 | switch (_IOC_NR(cmd)) { |
| 5384 | case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): |
| 5385 | case _IOC_NR(PERF_EVENT_IOC_ID): |
Eugene Syromiatnikov | 82489c5 | 2018-05-21 14:34:20 +0200 | [diff] [blame] | 5386 | case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF): |
| 5387 | case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES): |
Pawel Moll | b3f2078 | 2014-06-13 16:03:32 +0100 | [diff] [blame] | 5388 | /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ |
| 5389 | if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { |
| 5390 | cmd &= ~IOCSIZE_MASK; |
| 5391 | cmd |= sizeof(void *) << IOCSIZE_SHIFT; |
| 5392 | } |
| 5393 | break; |
| 5394 | } |
| 5395 | return perf_ioctl(file, cmd, arg); |
| 5396 | } |
| 5397 | #else |
| 5398 | # define perf_compat_ioctl NULL |
| 5399 | #endif |
| 5400 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5401 | int perf_event_task_enable(void) |
| 5402 | { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5403 | struct perf_event_context *ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5404 | struct perf_event *event; |
| 5405 | |
| 5406 | mutex_lock(¤t->perf_event_mutex); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5407 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { |
| 5408 | ctx = perf_event_ctx_lock(event); |
| 5409 | perf_event_for_each_child(event, _perf_event_enable); |
| 5410 | perf_event_ctx_unlock(event, ctx); |
| 5411 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5412 | mutex_unlock(¤t->perf_event_mutex); |
| 5413 | |
| 5414 | return 0; |
| 5415 | } |
| 5416 | |
| 5417 | int perf_event_task_disable(void) |
| 5418 | { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5419 | struct perf_event_context *ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5420 | struct perf_event *event; |
| 5421 | |
| 5422 | mutex_lock(¤t->perf_event_mutex); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5423 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { |
| 5424 | ctx = perf_event_ctx_lock(event); |
| 5425 | perf_event_for_each_child(event, _perf_event_disable); |
| 5426 | perf_event_ctx_unlock(event, ctx); |
| 5427 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5428 | mutex_unlock(¤t->perf_event_mutex); |
| 5429 | |
| 5430 | return 0; |
| 5431 | } |
| 5432 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5433 | static int perf_event_index(struct perf_event *event) |
| 5434 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5435 | if (event->hw.state & PERF_HES_STOPPED) |
| 5436 | return 0; |
| 5437 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5438 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 5439 | return 0; |
| 5440 | |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 5441 | return event->pmu->event_idx(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5442 | } |
| 5443 | |
Eric B Munson | c479429 | 2011-06-23 16:34:38 -0400 | [diff] [blame] | 5444 | static void calc_timer_values(struct perf_event *event, |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5445 | u64 *now, |
Eric B Munson | 7f310a5 | 2011-06-23 16:34:38 -0400 | [diff] [blame] | 5446 | u64 *enabled, |
| 5447 | u64 *running) |
Eric B Munson | c479429 | 2011-06-23 16:34:38 -0400 | [diff] [blame] | 5448 | { |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5449 | u64 ctx_time; |
Eric B Munson | c479429 | 2011-06-23 16:34:38 -0400 | [diff] [blame] | 5450 | |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5451 | *now = perf_clock(); |
| 5452 | ctx_time = event->shadow_ctx_time + *now; |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 5453 | __perf_update_times(event, ctx_time, enabled, running); |
Eric B Munson | c479429 | 2011-06-23 16:34:38 -0400 | [diff] [blame] | 5454 | } |
| 5455 | |
Peter Zijlstra | fa731587 | 2013-09-19 10:16:42 +0200 | [diff] [blame] | 5456 | static void perf_event_init_userpage(struct perf_event *event) |
| 5457 | { |
| 5458 | struct perf_event_mmap_page *userpg; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5459 | struct perf_buffer *rb; |
Peter Zijlstra | fa731587 | 2013-09-19 10:16:42 +0200 | [diff] [blame] | 5460 | |
| 5461 | rcu_read_lock(); |
| 5462 | rb = rcu_dereference(event->rb); |
| 5463 | if (!rb) |
| 5464 | goto unlock; |
| 5465 | |
| 5466 | userpg = rb->user_page; |
| 5467 | |
| 5468 | /* Allow new userspace to detect that bit 0 is deprecated */ |
| 5469 | userpg->cap_bit0_is_deprecated = 1; |
| 5470 | userpg->size = offsetof(struct perf_event_mmap_page, __reserved); |
Alexander Shishkin | e8c6dea | 2015-01-14 14:18:10 +0200 | [diff] [blame] | 5471 | userpg->data_offset = PAGE_SIZE; |
| 5472 | userpg->data_size = perf_data_size(rb); |
Peter Zijlstra | fa731587 | 2013-09-19 10:16:42 +0200 | [diff] [blame] | 5473 | |
| 5474 | unlock: |
| 5475 | rcu_read_unlock(); |
| 5476 | } |
| 5477 | |
Andy Lutomirski | c1317ec | 2014-10-24 15:58:11 -0700 | [diff] [blame] | 5478 | void __weak arch_perf_update_userpage( |
| 5479 | struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5480 | { |
| 5481 | } |
| 5482 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5483 | /* |
| 5484 | * Callers need to ensure there can be no nesting of this function, otherwise |
| 5485 | * the seqlock logic goes bad. We can not serialize this because the arch |
| 5486 | * code calls this from NMI context. |
| 5487 | */ |
| 5488 | void perf_event_update_userpage(struct perf_event *event) |
| 5489 | { |
| 5490 | struct perf_event_mmap_page *userpg; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5491 | struct perf_buffer *rb; |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5492 | u64 enabled, running, now; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5493 | |
| 5494 | rcu_read_lock(); |
Peter Zijlstra | 5ec4c59 | 2013-08-02 21:16:30 +0200 | [diff] [blame] | 5495 | rb = rcu_dereference(event->rb); |
| 5496 | if (!rb) |
| 5497 | goto unlock; |
| 5498 | |
Eric B Munson | 0d64120 | 2011-06-24 12:26:26 -0400 | [diff] [blame] | 5499 | /* |
| 5500 | * compute total_time_enabled, total_time_running |
| 5501 | * based on snapshot values taken when the event |
| 5502 | * was last scheduled in. |
| 5503 | * |
| 5504 | * we cannot simply called update_context_time() |
| 5505 | * because of locking issue as we can be called in |
| 5506 | * NMI context |
| 5507 | */ |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5508 | calc_timer_values(event, &now, &enabled, &running); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5509 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5510 | userpg = rb->user_page; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5511 | /* |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 5512 | * Disable preemption to guarantee consistent time stamps are stored to |
| 5513 | * the user page. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5514 | */ |
| 5515 | preempt_disable(); |
| 5516 | ++userpg->lock; |
| 5517 | barrier(); |
| 5518 | userpg->index = perf_event_index(event); |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 5519 | userpg->offset = perf_event_count(event); |
Peter Zijlstra | 365a403 | 2011-11-21 20:58:59 +0100 | [diff] [blame] | 5520 | if (userpg->index) |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 5521 | userpg->offset -= local64_read(&event->hw.prev_count); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5522 | |
Eric B Munson | 0d64120 | 2011-06-24 12:26:26 -0400 | [diff] [blame] | 5523 | userpg->time_enabled = enabled + |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5524 | atomic64_read(&event->child_total_time_enabled); |
| 5525 | |
Eric B Munson | 0d64120 | 2011-06-24 12:26:26 -0400 | [diff] [blame] | 5526 | userpg->time_running = running + |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5527 | atomic64_read(&event->child_total_time_running); |
| 5528 | |
Andy Lutomirski | c1317ec | 2014-10-24 15:58:11 -0700 | [diff] [blame] | 5529 | arch_perf_update_userpage(event, userpg, now); |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5530 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5531 | barrier(); |
| 5532 | ++userpg->lock; |
| 5533 | preempt_enable(); |
| 5534 | unlock: |
| 5535 | rcu_read_unlock(); |
| 5536 | } |
Suzuki K Poulose | 82975c4 | 2018-01-02 11:25:26 +0000 | [diff] [blame] | 5537 | EXPORT_SYMBOL_GPL(perf_event_update_userpage); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5538 | |
Souptick Joarder | 9e3ed2d | 2018-05-21 23:55:20 +0530 | [diff] [blame] | 5539 | static vm_fault_t perf_mmap_fault(struct vm_fault *vmf) |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5540 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 5541 | struct perf_event *event = vmf->vma->vm_file->private_data; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5542 | struct perf_buffer *rb; |
Souptick Joarder | 9e3ed2d | 2018-05-21 23:55:20 +0530 | [diff] [blame] | 5543 | vm_fault_t ret = VM_FAULT_SIGBUS; |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5544 | |
| 5545 | if (vmf->flags & FAULT_FLAG_MKWRITE) { |
| 5546 | if (vmf->pgoff == 0) |
| 5547 | ret = 0; |
| 5548 | return ret; |
| 5549 | } |
| 5550 | |
| 5551 | rcu_read_lock(); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5552 | rb = rcu_dereference(event->rb); |
| 5553 | if (!rb) |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5554 | goto unlock; |
| 5555 | |
| 5556 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) |
| 5557 | goto unlock; |
| 5558 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5559 | vmf->page = perf_mmap_to_page(rb, vmf->pgoff); |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5560 | if (!vmf->page) |
| 5561 | goto unlock; |
| 5562 | |
| 5563 | get_page(vmf->page); |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 5564 | vmf->page->mapping = vmf->vma->vm_file->f_mapping; |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5565 | vmf->page->index = vmf->pgoff; |
| 5566 | |
| 5567 | ret = 0; |
| 5568 | unlock: |
| 5569 | rcu_read_unlock(); |
| 5570 | |
| 5571 | return ret; |
| 5572 | } |
| 5573 | |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5574 | static void ring_buffer_attach(struct perf_event *event, |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5575 | struct perf_buffer *rb) |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5576 | { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5577 | struct perf_buffer *old_rb = NULL; |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5578 | unsigned long flags; |
| 5579 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5580 | if (event->rb) { |
| 5581 | /* |
| 5582 | * Should be impossible, we set this when removing |
| 5583 | * event->rb_entry and wait/clear when adding event->rb_entry. |
| 5584 | */ |
| 5585 | WARN_ON_ONCE(event->rcu_pending); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5586 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5587 | old_rb = event->rb; |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5588 | spin_lock_irqsave(&old_rb->event_lock, flags); |
| 5589 | list_del_rcu(&event->rb_entry); |
| 5590 | spin_unlock_irqrestore(&old_rb->event_lock, flags); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5591 | |
Oleg Nesterov | 2f993cf | 2015-05-30 22:04:25 +0200 | [diff] [blame] | 5592 | event->rcu_batches = get_state_synchronize_rcu(); |
| 5593 | event->rcu_pending = 1; |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5594 | } |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5595 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5596 | if (rb) { |
Oleg Nesterov | 2f993cf | 2015-05-30 22:04:25 +0200 | [diff] [blame] | 5597 | if (event->rcu_pending) { |
| 5598 | cond_synchronize_rcu(event->rcu_batches); |
| 5599 | event->rcu_pending = 0; |
| 5600 | } |
| 5601 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5602 | spin_lock_irqsave(&rb->event_lock, flags); |
| 5603 | list_add_rcu(&event->rb_entry, &rb->event_list); |
| 5604 | spin_unlock_irqrestore(&rb->event_lock, flags); |
| 5605 | } |
| 5606 | |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 5607 | /* |
| 5608 | * Avoid racing with perf_mmap_close(AUX): stop the event |
| 5609 | * before swizzling the event::rb pointer; if it's getting |
| 5610 | * unmapped, its aux_mmap_count will be 0 and it won't |
| 5611 | * restart. See the comment in __perf_pmu_output_stop(). |
| 5612 | * |
| 5613 | * Data will inevitably be lost when set_output is done in |
| 5614 | * mid-air, but then again, whoever does it like this is |
| 5615 | * not in for the data anyway. |
| 5616 | */ |
| 5617 | if (has_aux(event)) |
| 5618 | perf_event_stop(event, 0); |
| 5619 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5620 | rcu_assign_pointer(event->rb, rb); |
| 5621 | |
| 5622 | if (old_rb) { |
| 5623 | ring_buffer_put(old_rb); |
| 5624 | /* |
| 5625 | * Since we detached before setting the new rb, so that we |
| 5626 | * could attach the new rb, we could have missed a wakeup. |
| 5627 | * Provide it now. |
| 5628 | */ |
| 5629 | wake_up_all(&event->waitq); |
| 5630 | } |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5631 | } |
| 5632 | |
| 5633 | static void ring_buffer_wakeup(struct perf_event *event) |
| 5634 | { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5635 | struct perf_buffer *rb; |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5636 | |
| 5637 | rcu_read_lock(); |
| 5638 | rb = rcu_dereference(event->rb); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5639 | if (rb) { |
| 5640 | list_for_each_entry_rcu(event, &rb->event_list, rb_entry) |
| 5641 | wake_up_all(&event->waitq); |
| 5642 | } |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5643 | rcu_read_unlock(); |
| 5644 | } |
| 5645 | |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5646 | struct perf_buffer *ring_buffer_get(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5647 | { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5648 | struct perf_buffer *rb; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5649 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5650 | rcu_read_lock(); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5651 | rb = rcu_dereference(event->rb); |
| 5652 | if (rb) { |
Elena Reshetova | fecb8ed | 2019-01-28 14:27:27 +0200 | [diff] [blame] | 5653 | if (!refcount_inc_not_zero(&rb->refcount)) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5654 | rb = NULL; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5655 | } |
| 5656 | rcu_read_unlock(); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5657 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5658 | return rb; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5659 | } |
| 5660 | |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5661 | void ring_buffer_put(struct perf_buffer *rb) |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5662 | { |
Elena Reshetova | fecb8ed | 2019-01-28 14:27:27 +0200 | [diff] [blame] | 5663 | if (!refcount_dec_and_test(&rb->refcount)) |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5664 | return; |
| 5665 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5666 | WARN_ON_ONCE(!list_empty(&rb->event_list)); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5667 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5668 | call_rcu(&rb->rcu_head, rb_free_rcu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5669 | } |
| 5670 | |
| 5671 | static void perf_mmap_open(struct vm_area_struct *vma) |
| 5672 | { |
| 5673 | struct perf_event *event = vma->vm_file->private_data; |
| 5674 | |
| 5675 | atomic_inc(&event->mmap_count); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5676 | atomic_inc(&event->rb->mmap_count); |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 5677 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5678 | if (vma->vm_pgoff) |
| 5679 | atomic_inc(&event->rb->aux_mmap_count); |
| 5680 | |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 5681 | if (event->pmu->event_mapped) |
Peter Zijlstra | bfe33492 | 2017-08-02 19:39:30 +0200 | [diff] [blame] | 5682 | event->pmu->event_mapped(event, vma->vm_mm); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5683 | } |
| 5684 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 5685 | static void perf_pmu_output_stop(struct perf_event *event); |
| 5686 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5687 | /* |
| 5688 | * A buffer can be mmap()ed multiple times; either directly through the same |
| 5689 | * event, or through other events by use of perf_event_set_output(). |
| 5690 | * |
| 5691 | * In order to undo the VM accounting done by perf_mmap() we need to destroy |
| 5692 | * the buffer here, where we still have a VM context. This means we need |
| 5693 | * to detach all events redirecting to us. |
| 5694 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5695 | static void perf_mmap_close(struct vm_area_struct *vma) |
| 5696 | { |
| 5697 | struct perf_event *event = vma->vm_file->private_data; |
| 5698 | |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5699 | struct perf_buffer *rb = ring_buffer_get(event); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5700 | struct user_struct *mmap_user = rb->mmap_user; |
| 5701 | int mmap_locked = rb->mmap_locked; |
| 5702 | unsigned long size = perf_data_size(rb); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5703 | |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 5704 | if (event->pmu->event_unmapped) |
Peter Zijlstra | bfe33492 | 2017-08-02 19:39:30 +0200 | [diff] [blame] | 5705 | event->pmu->event_unmapped(event, vma->vm_mm); |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 5706 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5707 | /* |
| 5708 | * rb->aux_mmap_count will always drop before rb->mmap_count and |
| 5709 | * event->mmap_count, so it is ok to use event->mmap_mutex to |
| 5710 | * serialize with perf_mmap here. |
| 5711 | */ |
| 5712 | if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && |
| 5713 | atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 5714 | /* |
| 5715 | * Stop all AUX events that are writing to this buffer, |
| 5716 | * so that we can free its AUX pages and corresponding PMU |
| 5717 | * data. Note that after rb::aux_mmap_count dropped to zero, |
| 5718 | * they won't start any more (see perf_aux_output_begin()). |
| 5719 | */ |
| 5720 | perf_pmu_output_stop(event); |
| 5721 | |
| 5722 | /* now it's safe to free the pages */ |
Alexander Shishkin | 36b3db0 | 2019-11-15 18:08:18 +0200 | [diff] [blame] | 5723 | atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); |
| 5724 | atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5725 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 5726 | /* this has to be the last one */ |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5727 | rb_free_aux(rb); |
Elena Reshetova | ca3bb3d | 2019-01-28 14:27:28 +0200 | [diff] [blame] | 5728 | WARN_ON_ONCE(refcount_read(&rb->aux_refcount)); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 5729 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5730 | mutex_unlock(&event->mmap_mutex); |
| 5731 | } |
| 5732 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5733 | atomic_dec(&rb->mmap_count); |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5734 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5735 | if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5736 | goto out_put; |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5737 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5738 | ring_buffer_attach(event, NULL); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5739 | mutex_unlock(&event->mmap_mutex); |
| 5740 | |
| 5741 | /* If there's still other mmap()s of this buffer, we're done. */ |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5742 | if (atomic_read(&rb->mmap_count)) |
| 5743 | goto out_put; |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5744 | |
| 5745 | /* |
| 5746 | * No other mmap()s, detach from all other events that might redirect |
| 5747 | * into the now unreachable buffer. Somewhat complicated by the |
| 5748 | * fact that rb::event_lock otherwise nests inside mmap_mutex. |
| 5749 | */ |
| 5750 | again: |
| 5751 | rcu_read_lock(); |
| 5752 | list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { |
| 5753 | if (!atomic_long_inc_not_zero(&event->refcount)) { |
| 5754 | /* |
| 5755 | * This event is en-route to free_event() which will |
| 5756 | * detach it and remove it from the list. |
| 5757 | */ |
| 5758 | continue; |
| 5759 | } |
| 5760 | rcu_read_unlock(); |
| 5761 | |
| 5762 | mutex_lock(&event->mmap_mutex); |
| 5763 | /* |
| 5764 | * Check we didn't race with perf_event_set_output() which can |
| 5765 | * swizzle the rb from under us while we were waiting to |
| 5766 | * acquire mmap_mutex. |
| 5767 | * |
| 5768 | * If we find a different rb; ignore this event, a next |
| 5769 | * iteration will no longer find it on the list. We have to |
| 5770 | * still restart the iteration to make sure we're not now |
| 5771 | * iterating the wrong list. |
| 5772 | */ |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5773 | if (event->rb == rb) |
| 5774 | ring_buffer_attach(event, NULL); |
| 5775 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5776 | mutex_unlock(&event->mmap_mutex); |
| 5777 | put_event(event); |
| 5778 | |
| 5779 | /* |
| 5780 | * Restart the iteration; either we're on the wrong list or |
| 5781 | * destroyed its integrity by doing a deletion. |
| 5782 | */ |
| 5783 | goto again; |
| 5784 | } |
| 5785 | rcu_read_unlock(); |
| 5786 | |
| 5787 | /* |
| 5788 | * It could be there's still a few 0-ref events on the list; they'll |
| 5789 | * get cleaned up by free_event() -- they'll also still have their |
| 5790 | * ref on the rb and will free it whenever they are done with it. |
| 5791 | * |
| 5792 | * Aside from that, this buffer is 'fully' detached and unmapped, |
| 5793 | * undo the VM accounting. |
| 5794 | */ |
| 5795 | |
Song Liu | d44248a | 2019-09-04 14:46:18 -0700 | [diff] [blame] | 5796 | atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked, |
| 5797 | &mmap_user->locked_vm); |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 5798 | atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5799 | free_uid(mmap_user); |
| 5800 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5801 | out_put: |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5802 | ring_buffer_put(rb); /* could be last */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5803 | } |
| 5804 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 5805 | static const struct vm_operations_struct perf_mmap_vmops = { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5806 | .open = perf_mmap_open, |
Ingo Molnar | fca0c11 | 2018-12-03 10:52:21 +0100 | [diff] [blame] | 5807 | .close = perf_mmap_close, /* non mergeable */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5808 | .fault = perf_mmap_fault, |
| 5809 | .page_mkwrite = perf_mmap_fault, |
| 5810 | }; |
| 5811 | |
| 5812 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) |
| 5813 | { |
| 5814 | struct perf_event *event = file->private_data; |
| 5815 | unsigned long user_locked, user_lock_limit; |
| 5816 | struct user_struct *user = current_user(); |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5817 | struct perf_buffer *rb = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5818 | unsigned long locked, lock_limit; |
| 5819 | unsigned long vma_size; |
| 5820 | unsigned long nr_pages; |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5821 | long user_extra = 0, extra = 0; |
Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 5822 | int ret = 0, flags = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5823 | |
Peter Zijlstra | c792061 | 2010-05-18 10:33:24 +0200 | [diff] [blame] | 5824 | /* |
| 5825 | * Don't allow mmap() of inherited per-task counters. This would |
| 5826 | * create a performance issue due to all children writing to the |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5827 | * same rb. |
Peter Zijlstra | c792061 | 2010-05-18 10:33:24 +0200 | [diff] [blame] | 5828 | */ |
| 5829 | if (event->cpu == -1 && event->attr.inherit) |
| 5830 | return -EINVAL; |
| 5831 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5832 | if (!(vma->vm_flags & VM_SHARED)) |
| 5833 | return -EINVAL; |
| 5834 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 5835 | ret = security_perf_event_read(event); |
| 5836 | if (ret) |
| 5837 | return ret; |
| 5838 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5839 | vma_size = vma->vm_end - vma->vm_start; |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5840 | |
| 5841 | if (vma->vm_pgoff == 0) { |
| 5842 | nr_pages = (vma_size / PAGE_SIZE) - 1; |
| 5843 | } else { |
| 5844 | /* |
| 5845 | * AUX area mapping: if rb->aux_nr_pages != 0, it's already |
| 5846 | * mapped, all subsequent mappings should have the same size |
| 5847 | * and offset. Must be above the normal perf buffer. |
| 5848 | */ |
| 5849 | u64 aux_offset, aux_size; |
| 5850 | |
| 5851 | if (!event->rb) |
| 5852 | return -EINVAL; |
| 5853 | |
| 5854 | nr_pages = vma_size / PAGE_SIZE; |
| 5855 | |
| 5856 | mutex_lock(&event->mmap_mutex); |
| 5857 | ret = -EINVAL; |
| 5858 | |
| 5859 | rb = event->rb; |
| 5860 | if (!rb) |
| 5861 | goto aux_unlock; |
| 5862 | |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 5863 | aux_offset = READ_ONCE(rb->user_page->aux_offset); |
| 5864 | aux_size = READ_ONCE(rb->user_page->aux_size); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5865 | |
| 5866 | if (aux_offset < perf_data_size(rb) + PAGE_SIZE) |
| 5867 | goto aux_unlock; |
| 5868 | |
| 5869 | if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) |
| 5870 | goto aux_unlock; |
| 5871 | |
| 5872 | /* already mapped with a different offset */ |
| 5873 | if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) |
| 5874 | goto aux_unlock; |
| 5875 | |
| 5876 | if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) |
| 5877 | goto aux_unlock; |
| 5878 | |
| 5879 | /* already mapped with a different size */ |
| 5880 | if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) |
| 5881 | goto aux_unlock; |
| 5882 | |
| 5883 | if (!is_power_of_2(nr_pages)) |
| 5884 | goto aux_unlock; |
| 5885 | |
| 5886 | if (!atomic_inc_not_zero(&rb->mmap_count)) |
| 5887 | goto aux_unlock; |
| 5888 | |
| 5889 | if (rb_has_aux(rb)) { |
| 5890 | atomic_inc(&rb->aux_mmap_count); |
| 5891 | ret = 0; |
| 5892 | goto unlock; |
| 5893 | } |
| 5894 | |
| 5895 | atomic_set(&rb->aux_mmap_count, 1); |
| 5896 | user_extra = nr_pages; |
| 5897 | |
| 5898 | goto accounting; |
| 5899 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5900 | |
| 5901 | /* |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5902 | * If we have rb pages ensure they're a power-of-two number, so we |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5903 | * can do bitmasks instead of modulo. |
| 5904 | */ |
Kan Liang | 2ed1131 | 2015-03-02 02:14:26 -0500 | [diff] [blame] | 5905 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5906 | return -EINVAL; |
| 5907 | |
| 5908 | if (vma_size != PAGE_SIZE * (1 + nr_pages)) |
| 5909 | return -EINVAL; |
| 5910 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5911 | WARN_ON_ONCE(event->ctx->parent_ctx); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5912 | again: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5913 | mutex_lock(&event->mmap_mutex); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5914 | if (event->rb) { |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5915 | if (event->rb->nr_pages != nr_pages) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5916 | ret = -EINVAL; |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5917 | goto unlock; |
| 5918 | } |
| 5919 | |
| 5920 | if (!atomic_inc_not_zero(&event->rb->mmap_count)) { |
| 5921 | /* |
| 5922 | * Raced against perf_mmap_close() through |
| 5923 | * perf_event_set_output(). Try again, hope for better |
| 5924 | * luck. |
| 5925 | */ |
| 5926 | mutex_unlock(&event->mmap_mutex); |
| 5927 | goto again; |
| 5928 | } |
| 5929 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5930 | goto unlock; |
| 5931 | } |
| 5932 | |
| 5933 | user_extra = nr_pages + 1; |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5934 | |
| 5935 | accounting: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5936 | user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); |
| 5937 | |
| 5938 | /* |
| 5939 | * Increase the limit linearly with more CPUs: |
| 5940 | */ |
| 5941 | user_lock_limit *= num_online_cpus(); |
| 5942 | |
Song Liu | 0034615 | 2020-01-23 10:11:46 -0800 | [diff] [blame] | 5943 | user_locked = atomic_long_read(&user->locked_vm); |
| 5944 | |
| 5945 | /* |
| 5946 | * sysctl_perf_event_mlock may have changed, so that |
| 5947 | * user->locked_vm > user_lock_limit |
| 5948 | */ |
| 5949 | if (user_locked > user_lock_limit) |
| 5950 | user_locked = user_lock_limit; |
| 5951 | user_locked += user_extra; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5952 | |
Alexander Shishkin | c4b7547 | 2019-11-20 19:06:40 +0200 | [diff] [blame] | 5953 | if (user_locked > user_lock_limit) { |
Song Liu | d44248a | 2019-09-04 14:46:18 -0700 | [diff] [blame] | 5954 | /* |
| 5955 | * charge locked_vm until it hits user_lock_limit; |
| 5956 | * charge the rest from pinned_vm |
| 5957 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5958 | extra = user_locked - user_lock_limit; |
Song Liu | d44248a | 2019-09-04 14:46:18 -0700 | [diff] [blame] | 5959 | user_extra -= extra; |
| 5960 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5961 | |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 5962 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5963 | lock_limit >>= PAGE_SHIFT; |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 5964 | locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5965 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 5966 | if ((locked > lock_limit) && perf_is_paranoid() && |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5967 | !capable(CAP_IPC_LOCK)) { |
| 5968 | ret = -EPERM; |
| 5969 | goto unlock; |
| 5970 | } |
| 5971 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5972 | WARN_ON(!rb && event->rb); |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5973 | |
Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 5974 | if (vma->vm_flags & VM_WRITE) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5975 | flags |= RING_BUFFER_WRITABLE; |
Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 5976 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5977 | if (!rb) { |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5978 | rb = rb_alloc(nr_pages, |
| 5979 | event->attr.watermark ? event->attr.wakeup_watermark : 0, |
| 5980 | event->cpu, flags); |
| 5981 | |
| 5982 | if (!rb) { |
| 5983 | ret = -ENOMEM; |
| 5984 | goto unlock; |
| 5985 | } |
| 5986 | |
| 5987 | atomic_set(&rb->mmap_count, 1); |
| 5988 | rb->mmap_user = get_current_user(); |
| 5989 | rb->mmap_locked = extra; |
| 5990 | |
| 5991 | ring_buffer_attach(event, rb); |
| 5992 | |
| 5993 | perf_event_init_userpage(event); |
| 5994 | perf_event_update_userpage(event); |
| 5995 | } else { |
Alexander Shishkin | 1a59413 | 2015-01-14 14:18:18 +0200 | [diff] [blame] | 5996 | ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, |
| 5997 | event->attr.aux_watermark, flags); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 5998 | if (!ret) |
| 5999 | rb->aux_mmap_locked = extra; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6000 | } |
Peter Zijlstra | 26cb63a | 2013-05-28 10:55:48 +0200 | [diff] [blame] | 6001 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6002 | unlock: |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6003 | if (!ret) { |
| 6004 | atomic_long_add(user_extra, &user->locked_vm); |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 6005 | atomic64_add(extra, &vma->vm_mm->pinned_vm); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6006 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6007 | atomic_inc(&event->mmap_count); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6008 | } else if (rb) { |
| 6009 | atomic_dec(&rb->mmap_count); |
| 6010 | } |
| 6011 | aux_unlock: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6012 | mutex_unlock(&event->mmap_mutex); |
| 6013 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6014 | /* |
| 6015 | * Since pinned accounting is per vm we cannot allow fork() to copy our |
| 6016 | * vma. |
| 6017 | */ |
Peter Zijlstra | 26cb63a | 2013-05-28 10:55:48 +0200 | [diff] [blame] | 6018 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6019 | vma->vm_ops = &perf_mmap_vmops; |
| 6020 | |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 6021 | if (event->pmu->event_mapped) |
Peter Zijlstra | bfe33492 | 2017-08-02 19:39:30 +0200 | [diff] [blame] | 6022 | event->pmu->event_mapped(event, vma->vm_mm); |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 6023 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6024 | return ret; |
| 6025 | } |
| 6026 | |
| 6027 | static int perf_fasync(int fd, struct file *filp, int on) |
| 6028 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 6029 | struct inode *inode = file_inode(filp); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6030 | struct perf_event *event = filp->private_data; |
| 6031 | int retval; |
| 6032 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 6033 | inode_lock(inode); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6034 | retval = fasync_helper(fd, filp, on, &event->fasync); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 6035 | inode_unlock(inode); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6036 | |
| 6037 | if (retval < 0) |
| 6038 | return retval; |
| 6039 | |
| 6040 | return 0; |
| 6041 | } |
| 6042 | |
| 6043 | static const struct file_operations perf_fops = { |
Arnd Bergmann | 3326c1c | 2010-03-23 19:09:33 +0100 | [diff] [blame] | 6044 | .llseek = no_llseek, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6045 | .release = perf_release, |
| 6046 | .read = perf_read, |
| 6047 | .poll = perf_poll, |
| 6048 | .unlocked_ioctl = perf_ioctl, |
Pawel Moll | b3f2078 | 2014-06-13 16:03:32 +0100 | [diff] [blame] | 6049 | .compat_ioctl = perf_compat_ioctl, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6050 | .mmap = perf_mmap, |
| 6051 | .fasync = perf_fasync, |
| 6052 | }; |
| 6053 | |
| 6054 | /* |
| 6055 | * Perf event wakeup |
| 6056 | * |
| 6057 | * If there's data, ensure we set the poll() state and publish everything |
| 6058 | * to user-space before waking everybody up. |
| 6059 | */ |
| 6060 | |
Peter Zijlstra | fed66e2cd | 2015-06-11 10:32:01 +0200 | [diff] [blame] | 6061 | static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) |
| 6062 | { |
| 6063 | /* only the parent has fasync state */ |
| 6064 | if (event->parent) |
| 6065 | event = event->parent; |
| 6066 | return &event->fasync; |
| 6067 | } |
| 6068 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6069 | void perf_event_wakeup(struct perf_event *event) |
| 6070 | { |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6071 | ring_buffer_wakeup(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6072 | |
| 6073 | if (event->pending_kill) { |
Peter Zijlstra | fed66e2cd | 2015-06-11 10:32:01 +0200 | [diff] [blame] | 6074 | kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6075 | event->pending_kill = 0; |
| 6076 | } |
| 6077 | } |
| 6078 | |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 6079 | static void perf_pending_event_disable(struct perf_event *event) |
| 6080 | { |
| 6081 | int cpu = READ_ONCE(event->pending_disable); |
| 6082 | |
| 6083 | if (cpu < 0) |
| 6084 | return; |
| 6085 | |
| 6086 | if (cpu == smp_processor_id()) { |
| 6087 | WRITE_ONCE(event->pending_disable, -1); |
| 6088 | perf_event_disable_local(event); |
| 6089 | return; |
| 6090 | } |
| 6091 | |
| 6092 | /* |
| 6093 | * CPU-A CPU-B |
| 6094 | * |
| 6095 | * perf_event_disable_inatomic() |
| 6096 | * @pending_disable = CPU-A; |
| 6097 | * irq_work_queue(); |
| 6098 | * |
| 6099 | * sched-out |
| 6100 | * @pending_disable = -1; |
| 6101 | * |
| 6102 | * sched-in |
| 6103 | * perf_event_disable_inatomic() |
| 6104 | * @pending_disable = CPU-B; |
| 6105 | * irq_work_queue(); // FAILS |
| 6106 | * |
| 6107 | * irq_work_run() |
| 6108 | * perf_pending_event() |
| 6109 | * |
| 6110 | * But the event runs on CPU-B and wants disabling there. |
| 6111 | */ |
| 6112 | irq_work_queue_on(&event->pending, cpu); |
| 6113 | } |
| 6114 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 6115 | static void perf_pending_event(struct irq_work *entry) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6116 | { |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 6117 | struct perf_event *event = container_of(entry, struct perf_event, pending); |
Peter Zijlstra | d525211 | 2015-02-19 18:03:11 +0100 | [diff] [blame] | 6118 | int rctx; |
| 6119 | |
| 6120 | rctx = perf_swevent_get_recursion_context(); |
| 6121 | /* |
| 6122 | * If we 'fail' here, that's OK, it means recursion is already disabled |
| 6123 | * and we won't recurse 'further'. |
| 6124 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6125 | |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 6126 | perf_pending_event_disable(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6127 | |
| 6128 | if (event->pending_wakeup) { |
| 6129 | event->pending_wakeup = 0; |
| 6130 | perf_event_wakeup(event); |
| 6131 | } |
Peter Zijlstra | d525211 | 2015-02-19 18:03:11 +0100 | [diff] [blame] | 6132 | |
| 6133 | if (rctx >= 0) |
| 6134 | perf_swevent_put_recursion_context(rctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6135 | } |
| 6136 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6137 | /* |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 6138 | * We assume there is only KVM supporting the callbacks. |
| 6139 | * Later on, we might change it to a list if there is |
| 6140 | * another virtualization implementation supporting the callbacks. |
| 6141 | */ |
| 6142 | struct perf_guest_info_callbacks *perf_guest_cbs; |
| 6143 | |
| 6144 | int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) |
| 6145 | { |
| 6146 | perf_guest_cbs = cbs; |
| 6147 | return 0; |
| 6148 | } |
| 6149 | EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); |
| 6150 | |
| 6151 | int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) |
| 6152 | { |
| 6153 | perf_guest_cbs = NULL; |
| 6154 | return 0; |
| 6155 | } |
| 6156 | EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); |
| 6157 | |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6158 | static void |
| 6159 | perf_output_sample_regs(struct perf_output_handle *handle, |
| 6160 | struct pt_regs *regs, u64 mask) |
| 6161 | { |
| 6162 | int bit; |
Madhavan Srinivasan | 29dd328 | 2016-08-17 15:06:08 +0530 | [diff] [blame] | 6163 | DECLARE_BITMAP(_mask, 64); |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6164 | |
Madhavan Srinivasan | 29dd328 | 2016-08-17 15:06:08 +0530 | [diff] [blame] | 6165 | bitmap_from_u64(_mask, mask); |
| 6166 | for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) { |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6167 | u64 val; |
| 6168 | |
| 6169 | val = perf_reg_value(regs, bit); |
| 6170 | perf_output_put(handle, val); |
| 6171 | } |
| 6172 | } |
| 6173 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 6174 | static void perf_sample_regs_user(struct perf_regs *regs_user, |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 6175 | struct pt_regs *regs, |
| 6176 | struct pt_regs *regs_user_copy) |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6177 | { |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 6178 | if (user_mode(regs)) { |
| 6179 | regs_user->abi = perf_reg_abi(current); |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 6180 | regs_user->regs = regs; |
Peter Zijlstra | 085ebfe | 2019-05-29 14:37:24 +0200 | [diff] [blame] | 6181 | } else if (!(current->flags & PF_KTHREAD)) { |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 6182 | perf_get_regs_user(regs_user, regs, regs_user_copy); |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 6183 | } else { |
| 6184 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; |
| 6185 | regs_user->regs = NULL; |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6186 | } |
| 6187 | } |
| 6188 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 6189 | static void perf_sample_regs_intr(struct perf_regs *regs_intr, |
| 6190 | struct pt_regs *regs) |
| 6191 | { |
| 6192 | regs_intr->regs = regs; |
| 6193 | regs_intr->abi = perf_reg_abi(current); |
| 6194 | } |
| 6195 | |
| 6196 | |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6197 | /* |
| 6198 | * Get remaining task size from user stack pointer. |
| 6199 | * |
| 6200 | * It'd be better to take stack vma map and limit this more |
Roy Ben Shlomo | 9f014e3 | 2019-09-20 20:12:53 +0300 | [diff] [blame] | 6201 | * precisely, but there's no way to get it safely under interrupt, |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6202 | * so using TASK_SIZE as limit. |
| 6203 | */ |
| 6204 | static u64 perf_ustack_task_size(struct pt_regs *regs) |
| 6205 | { |
| 6206 | unsigned long addr = perf_user_stack_pointer(regs); |
| 6207 | |
| 6208 | if (!addr || addr >= TASK_SIZE) |
| 6209 | return 0; |
| 6210 | |
| 6211 | return TASK_SIZE - addr; |
| 6212 | } |
| 6213 | |
| 6214 | static u16 |
| 6215 | perf_sample_ustack_size(u16 stack_size, u16 header_size, |
| 6216 | struct pt_regs *regs) |
| 6217 | { |
| 6218 | u64 task_size; |
| 6219 | |
| 6220 | /* No regs, no stack pointer, no dump. */ |
| 6221 | if (!regs) |
| 6222 | return 0; |
| 6223 | |
| 6224 | /* |
| 6225 | * Check if we fit in with the requested stack size into the: |
| 6226 | * - TASK_SIZE |
| 6227 | * If we don't, we limit the size to the TASK_SIZE. |
| 6228 | * |
| 6229 | * - remaining sample size |
| 6230 | * If we don't, we customize the stack size to |
| 6231 | * fit in to the remaining sample size. |
| 6232 | */ |
| 6233 | |
| 6234 | task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); |
| 6235 | stack_size = min(stack_size, (u16) task_size); |
| 6236 | |
| 6237 | /* Current header size plus static size and dynamic size. */ |
| 6238 | header_size += 2 * sizeof(u64); |
| 6239 | |
| 6240 | /* Do we fit in with the current stack dump size? */ |
| 6241 | if ((u16) (header_size + stack_size) < header_size) { |
| 6242 | /* |
| 6243 | * If we overflow the maximum size for the sample, |
| 6244 | * we customize the stack dump size to fit in. |
| 6245 | */ |
| 6246 | stack_size = USHRT_MAX - header_size - sizeof(u64); |
| 6247 | stack_size = round_up(stack_size, sizeof(u64)); |
| 6248 | } |
| 6249 | |
| 6250 | return stack_size; |
| 6251 | } |
| 6252 | |
| 6253 | static void |
| 6254 | perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, |
| 6255 | struct pt_regs *regs) |
| 6256 | { |
| 6257 | /* Case of a kernel thread, nothing to dump */ |
| 6258 | if (!regs) { |
| 6259 | u64 size = 0; |
| 6260 | perf_output_put(handle, size); |
| 6261 | } else { |
| 6262 | unsigned long sp; |
| 6263 | unsigned int rem; |
| 6264 | u64 dyn_size; |
Yabin Cui | 02e1844 | 2018-08-23 15:59:35 -0700 | [diff] [blame] | 6265 | mm_segment_t fs; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6266 | |
| 6267 | /* |
| 6268 | * We dump: |
| 6269 | * static size |
| 6270 | * - the size requested by user or the best one we can fit |
| 6271 | * in to the sample max size |
| 6272 | * data |
| 6273 | * - user stack dump data |
| 6274 | * dynamic size |
| 6275 | * - the actual dumped size |
| 6276 | */ |
| 6277 | |
| 6278 | /* Static size. */ |
| 6279 | perf_output_put(handle, dump_size); |
| 6280 | |
| 6281 | /* Data. */ |
| 6282 | sp = perf_user_stack_pointer(regs); |
Yabin Cui | 02e1844 | 2018-08-23 15:59:35 -0700 | [diff] [blame] | 6283 | fs = get_fs(); |
| 6284 | set_fs(USER_DS); |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6285 | rem = __output_copy_user(handle, (void *) sp, dump_size); |
Yabin Cui | 02e1844 | 2018-08-23 15:59:35 -0700 | [diff] [blame] | 6286 | set_fs(fs); |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6287 | dyn_size = dump_size - rem; |
| 6288 | |
| 6289 | perf_output_skip(handle, rem); |
| 6290 | |
| 6291 | /* Dynamic size. */ |
| 6292 | perf_output_put(handle, dyn_size); |
| 6293 | } |
| 6294 | } |
| 6295 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6296 | static unsigned long perf_prepare_sample_aux(struct perf_event *event, |
| 6297 | struct perf_sample_data *data, |
| 6298 | size_t size) |
| 6299 | { |
| 6300 | struct perf_event *sampler = event->aux_event; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6301 | struct perf_buffer *rb; |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6302 | |
| 6303 | data->aux_size = 0; |
| 6304 | |
| 6305 | if (!sampler) |
| 6306 | goto out; |
| 6307 | |
| 6308 | if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE)) |
| 6309 | goto out; |
| 6310 | |
| 6311 | if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id())) |
| 6312 | goto out; |
| 6313 | |
| 6314 | rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); |
| 6315 | if (!rb) |
| 6316 | goto out; |
| 6317 | |
| 6318 | /* |
| 6319 | * If this is an NMI hit inside sampling code, don't take |
| 6320 | * the sample. See also perf_aux_sample_output(). |
| 6321 | */ |
| 6322 | if (READ_ONCE(rb->aux_in_sampling)) { |
| 6323 | data->aux_size = 0; |
| 6324 | } else { |
| 6325 | size = min_t(size_t, size, perf_aux_size(rb)); |
| 6326 | data->aux_size = ALIGN(size, sizeof(u64)); |
| 6327 | } |
| 6328 | ring_buffer_put(rb); |
| 6329 | |
| 6330 | out: |
| 6331 | return data->aux_size; |
| 6332 | } |
| 6333 | |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6334 | long perf_pmu_snapshot_aux(struct perf_buffer *rb, |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6335 | struct perf_event *event, |
| 6336 | struct perf_output_handle *handle, |
| 6337 | unsigned long size) |
| 6338 | { |
| 6339 | unsigned long flags; |
| 6340 | long ret; |
| 6341 | |
| 6342 | /* |
| 6343 | * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler |
| 6344 | * paths. If we start calling them in NMI context, they may race with |
| 6345 | * the IRQ ones, that is, for example, re-starting an event that's just |
| 6346 | * been stopped, which is why we're using a separate callback that |
| 6347 | * doesn't change the event state. |
| 6348 | * |
| 6349 | * IRQs need to be disabled to prevent IPIs from racing with us. |
| 6350 | */ |
| 6351 | local_irq_save(flags); |
| 6352 | /* |
| 6353 | * Guard against NMI hits inside the critical section; |
| 6354 | * see also perf_prepare_sample_aux(). |
| 6355 | */ |
| 6356 | WRITE_ONCE(rb->aux_in_sampling, 1); |
| 6357 | barrier(); |
| 6358 | |
| 6359 | ret = event->pmu->snapshot_aux(event, handle, size); |
| 6360 | |
| 6361 | barrier(); |
| 6362 | WRITE_ONCE(rb->aux_in_sampling, 0); |
| 6363 | local_irq_restore(flags); |
| 6364 | |
| 6365 | return ret; |
| 6366 | } |
| 6367 | |
| 6368 | static void perf_aux_sample_output(struct perf_event *event, |
| 6369 | struct perf_output_handle *handle, |
| 6370 | struct perf_sample_data *data) |
| 6371 | { |
| 6372 | struct perf_event *sampler = event->aux_event; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6373 | struct perf_buffer *rb; |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6374 | unsigned long pad; |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6375 | long size; |
| 6376 | |
| 6377 | if (WARN_ON_ONCE(!sampler || !data->aux_size)) |
| 6378 | return; |
| 6379 | |
| 6380 | rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); |
| 6381 | if (!rb) |
| 6382 | return; |
| 6383 | |
| 6384 | size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size); |
| 6385 | |
| 6386 | /* |
| 6387 | * An error here means that perf_output_copy() failed (returned a |
| 6388 | * non-zero surplus that it didn't copy), which in its current |
| 6389 | * enlightened implementation is not possible. If that changes, we'd |
| 6390 | * like to know. |
| 6391 | */ |
| 6392 | if (WARN_ON_ONCE(size < 0)) |
| 6393 | goto out_put; |
| 6394 | |
| 6395 | /* |
| 6396 | * The pad comes from ALIGN()ing data->aux_size up to u64 in |
| 6397 | * perf_prepare_sample_aux(), so should not be more than that. |
| 6398 | */ |
| 6399 | pad = data->aux_size - size; |
| 6400 | if (WARN_ON_ONCE(pad >= sizeof(u64))) |
| 6401 | pad = 8; |
| 6402 | |
| 6403 | if (pad) { |
| 6404 | u64 zero = 0; |
| 6405 | perf_output_copy(handle, &zero, pad); |
| 6406 | } |
| 6407 | |
| 6408 | out_put: |
| 6409 | ring_buffer_put(rb); |
| 6410 | } |
| 6411 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6412 | static void __perf_event_header__init_id(struct perf_event_header *header, |
| 6413 | struct perf_sample_data *data, |
| 6414 | struct perf_event *event) |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6415 | { |
| 6416 | u64 sample_type = event->attr.sample_type; |
| 6417 | |
| 6418 | data->type = sample_type; |
| 6419 | header->size += event->id_header_size; |
| 6420 | |
| 6421 | if (sample_type & PERF_SAMPLE_TID) { |
| 6422 | /* namespace issues */ |
| 6423 | data->tid_entry.pid = perf_event_pid(event, current); |
| 6424 | data->tid_entry.tid = perf_event_tid(event, current); |
| 6425 | } |
| 6426 | |
| 6427 | if (sample_type & PERF_SAMPLE_TIME) |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 6428 | data->time = perf_event_clock(event); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6429 | |
Adrian Hunter | ff3d527 | 2013-08-27 11:23:07 +0300 | [diff] [blame] | 6430 | if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6431 | data->id = primary_event_id(event); |
| 6432 | |
| 6433 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 6434 | data->stream_id = event->id; |
| 6435 | |
| 6436 | if (sample_type & PERF_SAMPLE_CPU) { |
| 6437 | data->cpu_entry.cpu = raw_smp_processor_id(); |
| 6438 | data->cpu_entry.reserved = 0; |
| 6439 | } |
| 6440 | } |
| 6441 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6442 | void perf_event_header__init_id(struct perf_event_header *header, |
| 6443 | struct perf_sample_data *data, |
| 6444 | struct perf_event *event) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6445 | { |
| 6446 | if (event->attr.sample_id_all) |
| 6447 | __perf_event_header__init_id(header, data, event); |
| 6448 | } |
| 6449 | |
| 6450 | static void __perf_event__output_id_sample(struct perf_output_handle *handle, |
| 6451 | struct perf_sample_data *data) |
| 6452 | { |
| 6453 | u64 sample_type = data->type; |
| 6454 | |
| 6455 | if (sample_type & PERF_SAMPLE_TID) |
| 6456 | perf_output_put(handle, data->tid_entry); |
| 6457 | |
| 6458 | if (sample_type & PERF_SAMPLE_TIME) |
| 6459 | perf_output_put(handle, data->time); |
| 6460 | |
| 6461 | if (sample_type & PERF_SAMPLE_ID) |
| 6462 | perf_output_put(handle, data->id); |
| 6463 | |
| 6464 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 6465 | perf_output_put(handle, data->stream_id); |
| 6466 | |
| 6467 | if (sample_type & PERF_SAMPLE_CPU) |
| 6468 | perf_output_put(handle, data->cpu_entry); |
Adrian Hunter | ff3d527 | 2013-08-27 11:23:07 +0300 | [diff] [blame] | 6469 | |
| 6470 | if (sample_type & PERF_SAMPLE_IDENTIFIER) |
| 6471 | perf_output_put(handle, data->id); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6472 | } |
| 6473 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6474 | void perf_event__output_id_sample(struct perf_event *event, |
| 6475 | struct perf_output_handle *handle, |
| 6476 | struct perf_sample_data *sample) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6477 | { |
| 6478 | if (event->attr.sample_id_all) |
| 6479 | __perf_event__output_id_sample(handle, sample); |
| 6480 | } |
| 6481 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6482 | static void perf_output_read_one(struct perf_output_handle *handle, |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6483 | struct perf_event *event, |
| 6484 | u64 enabled, u64 running) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6485 | { |
| 6486 | u64 read_format = event->attr.read_format; |
| 6487 | u64 values[4]; |
| 6488 | int n = 0; |
| 6489 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 6490 | values[n++] = perf_event_count(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6491 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6492 | values[n++] = enabled + |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6493 | atomic64_read(&event->child_total_time_enabled); |
| 6494 | } |
| 6495 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6496 | values[n++] = running + |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6497 | atomic64_read(&event->child_total_time_running); |
| 6498 | } |
| 6499 | if (read_format & PERF_FORMAT_ID) |
| 6500 | values[n++] = primary_event_id(event); |
| 6501 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6502 | __output_copy(handle, values, n * sizeof(u64)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6503 | } |
| 6504 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6505 | static void perf_output_read_group(struct perf_output_handle *handle, |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6506 | struct perf_event *event, |
| 6507 | u64 enabled, u64 running) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6508 | { |
| 6509 | struct perf_event *leader = event->group_leader, *sub; |
| 6510 | u64 read_format = event->attr.read_format; |
| 6511 | u64 values[5]; |
| 6512 | int n = 0; |
| 6513 | |
| 6514 | values[n++] = 1 + leader->nr_siblings; |
| 6515 | |
| 6516 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6517 | values[n++] = enabled; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6518 | |
| 6519 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6520 | values[n++] = running; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6521 | |
Peter Zijlstra | 9e5b127 | 2018-03-09 12:52:04 +0100 | [diff] [blame] | 6522 | if ((leader != event) && |
| 6523 | (leader->state == PERF_EVENT_STATE_ACTIVE)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6524 | leader->pmu->read(leader); |
| 6525 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 6526 | values[n++] = perf_event_count(leader); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6527 | if (read_format & PERF_FORMAT_ID) |
| 6528 | values[n++] = primary_event_id(leader); |
| 6529 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6530 | __output_copy(handle, values, n * sizeof(u64)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6531 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 6532 | for_each_sibling_event(sub, leader) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6533 | n = 0; |
| 6534 | |
Jiri Olsa | 6f5ab00 | 2012-10-15 20:13:45 +0200 | [diff] [blame] | 6535 | if ((sub != event) && |
| 6536 | (sub->state == PERF_EVENT_STATE_ACTIVE)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6537 | sub->pmu->read(sub); |
| 6538 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 6539 | values[n++] = perf_event_count(sub); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6540 | if (read_format & PERF_FORMAT_ID) |
| 6541 | values[n++] = primary_event_id(sub); |
| 6542 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6543 | __output_copy(handle, values, n * sizeof(u64)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6544 | } |
| 6545 | } |
| 6546 | |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6547 | #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ |
| 6548 | PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 6549 | |
Peter Zijlstra | ba5213a | 2017-05-30 11:45:12 +0200 | [diff] [blame] | 6550 | /* |
| 6551 | * XXX PERF_SAMPLE_READ vs inherited events seems difficult. |
| 6552 | * |
| 6553 | * The problem is that its both hard and excessively expensive to iterate the |
| 6554 | * child list, not to mention that its impossible to IPI the children running |
| 6555 | * on another CPU, from interrupt/NMI context. |
| 6556 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6557 | static void perf_output_read(struct perf_output_handle *handle, |
| 6558 | struct perf_event *event) |
| 6559 | { |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 6560 | u64 enabled = 0, running = 0, now; |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6561 | u64 read_format = event->attr.read_format; |
| 6562 | |
| 6563 | /* |
| 6564 | * compute total_time_enabled, total_time_running |
| 6565 | * based on snapshot values taken when the event |
| 6566 | * was last scheduled in. |
| 6567 | * |
| 6568 | * we cannot simply called update_context_time() |
| 6569 | * because of locking issue as we are called in |
| 6570 | * NMI context |
| 6571 | */ |
Eric B Munson | c479429 | 2011-06-23 16:34:38 -0400 | [diff] [blame] | 6572 | if (read_format & PERF_FORMAT_TOTAL_TIMES) |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 6573 | calc_timer_values(event, &now, &enabled, &running); |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6574 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6575 | if (event->attr.read_format & PERF_FORMAT_GROUP) |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6576 | perf_output_read_group(handle, event, enabled, running); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6577 | else |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6578 | perf_output_read_one(handle, event, enabled, running); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6579 | } |
| 6580 | |
Kan Liang | bbfd5e4 | 2020-01-27 08:53:54 -0800 | [diff] [blame] | 6581 | static inline bool perf_sample_save_hw_index(struct perf_event *event) |
| 6582 | { |
| 6583 | return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; |
| 6584 | } |
| 6585 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6586 | void perf_output_sample(struct perf_output_handle *handle, |
| 6587 | struct perf_event_header *header, |
| 6588 | struct perf_sample_data *data, |
| 6589 | struct perf_event *event) |
| 6590 | { |
| 6591 | u64 sample_type = data->type; |
| 6592 | |
| 6593 | perf_output_put(handle, *header); |
| 6594 | |
Adrian Hunter | ff3d527 | 2013-08-27 11:23:07 +0300 | [diff] [blame] | 6595 | if (sample_type & PERF_SAMPLE_IDENTIFIER) |
| 6596 | perf_output_put(handle, data->id); |
| 6597 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6598 | if (sample_type & PERF_SAMPLE_IP) |
| 6599 | perf_output_put(handle, data->ip); |
| 6600 | |
| 6601 | if (sample_type & PERF_SAMPLE_TID) |
| 6602 | perf_output_put(handle, data->tid_entry); |
| 6603 | |
| 6604 | if (sample_type & PERF_SAMPLE_TIME) |
| 6605 | perf_output_put(handle, data->time); |
| 6606 | |
| 6607 | if (sample_type & PERF_SAMPLE_ADDR) |
| 6608 | perf_output_put(handle, data->addr); |
| 6609 | |
| 6610 | if (sample_type & PERF_SAMPLE_ID) |
| 6611 | perf_output_put(handle, data->id); |
| 6612 | |
| 6613 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 6614 | perf_output_put(handle, data->stream_id); |
| 6615 | |
| 6616 | if (sample_type & PERF_SAMPLE_CPU) |
| 6617 | perf_output_put(handle, data->cpu_entry); |
| 6618 | |
| 6619 | if (sample_type & PERF_SAMPLE_PERIOD) |
| 6620 | perf_output_put(handle, data->period); |
| 6621 | |
| 6622 | if (sample_type & PERF_SAMPLE_READ) |
| 6623 | perf_output_read(handle, event); |
| 6624 | |
| 6625 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 6626 | int size = 1; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6627 | |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 6628 | size += data->callchain->nr; |
| 6629 | size *= sizeof(u64); |
| 6630 | __output_copy(handle, data->callchain, size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6631 | } |
| 6632 | |
| 6633 | if (sample_type & PERF_SAMPLE_RAW) { |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 6634 | struct perf_raw_record *raw = data->raw; |
Alexei Starovoitov | fa128e6 | 2015-10-20 20:02:33 -0700 | [diff] [blame] | 6635 | |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 6636 | if (raw) { |
| 6637 | struct perf_raw_frag *frag = &raw->frag; |
| 6638 | |
| 6639 | perf_output_put(handle, raw->size); |
| 6640 | do { |
| 6641 | if (frag->copy) { |
| 6642 | __output_custom(handle, frag->copy, |
| 6643 | frag->data, frag->size); |
| 6644 | } else { |
| 6645 | __output_copy(handle, frag->data, |
| 6646 | frag->size); |
| 6647 | } |
| 6648 | if (perf_raw_frag_last(frag)) |
| 6649 | break; |
| 6650 | frag = frag->next; |
| 6651 | } while (1); |
| 6652 | if (frag->pad) |
| 6653 | __output_skip(handle, NULL, frag->pad); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6654 | } else { |
| 6655 | struct { |
| 6656 | u32 size; |
| 6657 | u32 data; |
| 6658 | } raw = { |
| 6659 | .size = sizeof(u32), |
| 6660 | .data = 0, |
| 6661 | }; |
| 6662 | perf_output_put(handle, raw); |
| 6663 | } |
| 6664 | } |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 6665 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 6666 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { |
| 6667 | if (data->br_stack) { |
| 6668 | size_t size; |
| 6669 | |
| 6670 | size = data->br_stack->nr |
| 6671 | * sizeof(struct perf_branch_entry); |
| 6672 | |
| 6673 | perf_output_put(handle, data->br_stack->nr); |
Kan Liang | bbfd5e4 | 2020-01-27 08:53:54 -0800 | [diff] [blame] | 6674 | if (perf_sample_save_hw_index(event)) |
| 6675 | perf_output_put(handle, data->br_stack->hw_idx); |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 6676 | perf_output_copy(handle, data->br_stack->entries, size); |
| 6677 | } else { |
| 6678 | /* |
| 6679 | * we always store at least the value of nr |
| 6680 | */ |
| 6681 | u64 nr = 0; |
| 6682 | perf_output_put(handle, nr); |
| 6683 | } |
| 6684 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6685 | |
| 6686 | if (sample_type & PERF_SAMPLE_REGS_USER) { |
| 6687 | u64 abi = data->regs_user.abi; |
| 6688 | |
| 6689 | /* |
| 6690 | * If there are no regs to dump, notice it through |
| 6691 | * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). |
| 6692 | */ |
| 6693 | perf_output_put(handle, abi); |
| 6694 | |
| 6695 | if (abi) { |
| 6696 | u64 mask = event->attr.sample_regs_user; |
| 6697 | perf_output_sample_regs(handle, |
| 6698 | data->regs_user.regs, |
| 6699 | mask); |
| 6700 | } |
| 6701 | } |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6702 | |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 6703 | if (sample_type & PERF_SAMPLE_STACK_USER) { |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6704 | perf_output_sample_ustack(handle, |
| 6705 | data->stack_user_size, |
| 6706 | data->regs_user.regs); |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 6707 | } |
Andi Kleen | c3feedf | 2013-01-24 16:10:28 +0100 | [diff] [blame] | 6708 | |
| 6709 | if (sample_type & PERF_SAMPLE_WEIGHT) |
| 6710 | perf_output_put(handle, data->weight); |
Stephane Eranian | d6be9ad | 2013-01-24 16:10:31 +0100 | [diff] [blame] | 6711 | |
| 6712 | if (sample_type & PERF_SAMPLE_DATA_SRC) |
| 6713 | perf_output_put(handle, data->data_src.val); |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 6714 | |
Andi Kleen | fdfbbd0 | 2013-09-20 07:40:39 -0700 | [diff] [blame] | 6715 | if (sample_type & PERF_SAMPLE_TRANSACTION) |
| 6716 | perf_output_put(handle, data->txn); |
| 6717 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 6718 | if (sample_type & PERF_SAMPLE_REGS_INTR) { |
| 6719 | u64 abi = data->regs_intr.abi; |
| 6720 | /* |
| 6721 | * If there are no regs to dump, notice it through |
| 6722 | * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). |
| 6723 | */ |
| 6724 | perf_output_put(handle, abi); |
| 6725 | |
| 6726 | if (abi) { |
| 6727 | u64 mask = event->attr.sample_regs_intr; |
| 6728 | |
| 6729 | perf_output_sample_regs(handle, |
| 6730 | data->regs_intr.regs, |
| 6731 | mask); |
| 6732 | } |
| 6733 | } |
| 6734 | |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 6735 | if (sample_type & PERF_SAMPLE_PHYS_ADDR) |
| 6736 | perf_output_put(handle, data->phys_addr); |
| 6737 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6738 | if (sample_type & PERF_SAMPLE_AUX) { |
| 6739 | perf_output_put(handle, data->aux_size); |
| 6740 | |
| 6741 | if (data->aux_size) |
| 6742 | perf_aux_sample_output(event, handle, data); |
| 6743 | } |
| 6744 | |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 6745 | if (!event->attr.watermark) { |
| 6746 | int wakeup_events = event->attr.wakeup_events; |
| 6747 | |
| 6748 | if (wakeup_events) { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6749 | struct perf_buffer *rb = handle->rb; |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 6750 | int events = local_inc_return(&rb->events); |
| 6751 | |
| 6752 | if (events >= wakeup_events) { |
| 6753 | local_sub(wakeup_events, &rb->events); |
| 6754 | local_inc(&rb->wakeup); |
| 6755 | } |
| 6756 | } |
| 6757 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6758 | } |
| 6759 | |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 6760 | static u64 perf_virt_to_phys(u64 virt) |
| 6761 | { |
| 6762 | u64 phys_addr = 0; |
| 6763 | struct page *p = NULL; |
| 6764 | |
| 6765 | if (!virt) |
| 6766 | return 0; |
| 6767 | |
| 6768 | if (virt >= TASK_SIZE) { |
| 6769 | /* If it's vmalloc()d memory, leave phys_addr as 0 */ |
| 6770 | if (virt_addr_valid((void *)(uintptr_t)virt) && |
| 6771 | !(virt >= VMALLOC_START && virt < VMALLOC_END)) |
| 6772 | phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt); |
| 6773 | } else { |
| 6774 | /* |
| 6775 | * Walking the pages tables for user address. |
| 6776 | * Interrupts are disabled, so it prevents any tear down |
| 6777 | * of the page tables. |
| 6778 | * Try IRQ-safe __get_user_pages_fast first. |
| 6779 | * If failed, leave phys_addr as 0. |
| 6780 | */ |
| 6781 | if ((current->mm != NULL) && |
| 6782 | (__get_user_pages_fast(virt, 1, 0, &p) == 1)) |
| 6783 | phys_addr = page_to_phys(p) + virt % PAGE_SIZE; |
| 6784 | |
| 6785 | if (p) |
| 6786 | put_page(p); |
| 6787 | } |
| 6788 | |
| 6789 | return phys_addr; |
| 6790 | } |
| 6791 | |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 6792 | static struct perf_callchain_entry __empty_callchain = { .nr = 0, }; |
| 6793 | |
Peter Zijlstra | 6cbc304 | 2018-05-10 15:48:41 +0200 | [diff] [blame] | 6794 | struct perf_callchain_entry * |
Jiri Olsa | 8cf7e0e | 2018-01-07 17:03:49 +0100 | [diff] [blame] | 6795 | perf_callchain(struct perf_event *event, struct pt_regs *regs) |
| 6796 | { |
| 6797 | bool kernel = !event->attr.exclude_callchain_kernel; |
| 6798 | bool user = !event->attr.exclude_callchain_user; |
| 6799 | /* Disallow cross-task user callchains. */ |
| 6800 | bool crosstask = event->ctx->task && event->ctx->task != current; |
| 6801 | const u32 max_stack = event->attr.sample_max_stack; |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 6802 | struct perf_callchain_entry *callchain; |
Jiri Olsa | 8cf7e0e | 2018-01-07 17:03:49 +0100 | [diff] [blame] | 6803 | |
| 6804 | if (!kernel && !user) |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 6805 | return &__empty_callchain; |
Jiri Olsa | 8cf7e0e | 2018-01-07 17:03:49 +0100 | [diff] [blame] | 6806 | |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 6807 | callchain = get_perf_callchain(regs, 0, kernel, user, |
| 6808 | max_stack, crosstask, true); |
| 6809 | return callchain ?: &__empty_callchain; |
Jiri Olsa | 8cf7e0e | 2018-01-07 17:03:49 +0100 | [diff] [blame] | 6810 | } |
| 6811 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6812 | void perf_prepare_sample(struct perf_event_header *header, |
| 6813 | struct perf_sample_data *data, |
| 6814 | struct perf_event *event, |
| 6815 | struct pt_regs *regs) |
| 6816 | { |
| 6817 | u64 sample_type = event->attr.sample_type; |
| 6818 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6819 | header->type = PERF_RECORD_SAMPLE; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 6820 | header->size = sizeof(*header) + event->header_size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6821 | |
| 6822 | header->misc = 0; |
| 6823 | header->misc |= perf_misc_flags(regs); |
| 6824 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6825 | __perf_event_header__init_id(header, data, event); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6826 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 6827 | if (sample_type & PERF_SAMPLE_IP) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6828 | data->ip = perf_instruction_pointer(regs); |
| 6829 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6830 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
| 6831 | int size = 1; |
| 6832 | |
Peter Zijlstra | 6cbc304 | 2018-05-10 15:48:41 +0200 | [diff] [blame] | 6833 | if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) |
| 6834 | data->callchain = perf_callchain(event, regs); |
| 6835 | |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 6836 | size += data->callchain->nr; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6837 | |
| 6838 | header->size += size * sizeof(u64); |
| 6839 | } |
| 6840 | |
| 6841 | if (sample_type & PERF_SAMPLE_RAW) { |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 6842 | struct perf_raw_record *raw = data->raw; |
| 6843 | int size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6844 | |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 6845 | if (raw) { |
| 6846 | struct perf_raw_frag *frag = &raw->frag; |
| 6847 | u32 sum = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6848 | |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 6849 | do { |
| 6850 | sum += frag->size; |
| 6851 | if (perf_raw_frag_last(frag)) |
| 6852 | break; |
| 6853 | frag = frag->next; |
| 6854 | } while (1); |
| 6855 | |
| 6856 | size = round_up(sum + sizeof(u32), sizeof(u64)); |
| 6857 | raw->size = size - sizeof(u32); |
| 6858 | frag->pad = raw->size - sum; |
| 6859 | } else { |
| 6860 | size = sizeof(u64); |
| 6861 | } |
| 6862 | |
| 6863 | header->size += size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6864 | } |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 6865 | |
| 6866 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { |
| 6867 | int size = sizeof(u64); /* nr */ |
| 6868 | if (data->br_stack) { |
Kan Liang | bbfd5e4 | 2020-01-27 08:53:54 -0800 | [diff] [blame] | 6869 | if (perf_sample_save_hw_index(event)) |
| 6870 | size += sizeof(u64); |
| 6871 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 6872 | size += data->br_stack->nr |
| 6873 | * sizeof(struct perf_branch_entry); |
| 6874 | } |
| 6875 | header->size += size; |
| 6876 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6877 | |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 6878 | if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 6879 | perf_sample_regs_user(&data->regs_user, regs, |
| 6880 | &data->regs_user_copy); |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 6881 | |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6882 | if (sample_type & PERF_SAMPLE_REGS_USER) { |
| 6883 | /* regs dump ABI info */ |
| 6884 | int size = sizeof(u64); |
| 6885 | |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6886 | if (data->regs_user.regs) { |
| 6887 | u64 mask = event->attr.sample_regs_user; |
| 6888 | size += hweight64(mask) * sizeof(u64); |
| 6889 | } |
| 6890 | |
| 6891 | header->size += size; |
| 6892 | } |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6893 | |
| 6894 | if (sample_type & PERF_SAMPLE_STACK_USER) { |
| 6895 | /* |
Roy Ben Shlomo | 9f014e3 | 2019-09-20 20:12:53 +0300 | [diff] [blame] | 6896 | * Either we need PERF_SAMPLE_STACK_USER bit to be always |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6897 | * processed as the last one or have additional check added |
| 6898 | * in case new sample type is added, because we could eat |
| 6899 | * up the rest of the sample size. |
| 6900 | */ |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6901 | u16 stack_size = event->attr.sample_stack_user; |
| 6902 | u16 size = sizeof(u64); |
| 6903 | |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6904 | stack_size = perf_sample_ustack_size(stack_size, header->size, |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 6905 | data->regs_user.regs); |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6906 | |
| 6907 | /* |
| 6908 | * If there is something to dump, add space for the dump |
| 6909 | * itself and for the field that tells the dynamic size, |
| 6910 | * which is how many have been actually dumped. |
| 6911 | */ |
| 6912 | if (stack_size) |
| 6913 | size += sizeof(u64) + stack_size; |
| 6914 | |
| 6915 | data->stack_user_size = stack_size; |
| 6916 | header->size += size; |
| 6917 | } |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 6918 | |
| 6919 | if (sample_type & PERF_SAMPLE_REGS_INTR) { |
| 6920 | /* regs dump ABI info */ |
| 6921 | int size = sizeof(u64); |
| 6922 | |
| 6923 | perf_sample_regs_intr(&data->regs_intr, regs); |
| 6924 | |
| 6925 | if (data->regs_intr.regs) { |
| 6926 | u64 mask = event->attr.sample_regs_intr; |
| 6927 | |
| 6928 | size += hweight64(mask) * sizeof(u64); |
| 6929 | } |
| 6930 | |
| 6931 | header->size += size; |
| 6932 | } |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 6933 | |
| 6934 | if (sample_type & PERF_SAMPLE_PHYS_ADDR) |
| 6935 | data->phys_addr = perf_virt_to_phys(data->addr); |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6936 | |
| 6937 | if (sample_type & PERF_SAMPLE_AUX) { |
| 6938 | u64 size; |
| 6939 | |
| 6940 | header->size += sizeof(u64); /* size */ |
| 6941 | |
| 6942 | /* |
| 6943 | * Given the 16bit nature of header::size, an AUX sample can |
| 6944 | * easily overflow it, what with all the preceding sample bits. |
| 6945 | * Make sure this doesn't happen by using up to U16_MAX bytes |
| 6946 | * per sample in total (rounded down to 8 byte boundary). |
| 6947 | */ |
| 6948 | size = min_t(size_t, U16_MAX - header->size, |
| 6949 | event->attr.aux_sample_size); |
| 6950 | size = rounddown(size, 8); |
| 6951 | size = perf_prepare_sample_aux(event, data, size); |
| 6952 | |
| 6953 | WARN_ON_ONCE(size + header->size > U16_MAX); |
| 6954 | header->size += size; |
| 6955 | } |
| 6956 | /* |
| 6957 | * If you're adding more sample types here, you likely need to do |
| 6958 | * something about the overflowing header::size, like repurpose the |
| 6959 | * lowest 3 bits of size, which should be always zero at the moment. |
| 6960 | * This raises a more important question, do we really need 512k sized |
| 6961 | * samples and why, so good argumentation is in order for whatever you |
| 6962 | * do here next. |
| 6963 | */ |
| 6964 | WARN_ON_ONCE(header->size & 7); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6965 | } |
| 6966 | |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 6967 | static __always_inline int |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 6968 | __perf_event_output(struct perf_event *event, |
| 6969 | struct perf_sample_data *data, |
| 6970 | struct pt_regs *regs, |
| 6971 | int (*output_begin)(struct perf_output_handle *, |
| 6972 | struct perf_event *, |
| 6973 | unsigned int)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6974 | { |
| 6975 | struct perf_output_handle handle; |
| 6976 | struct perf_event_header header; |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 6977 | int err; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6978 | |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 6979 | /* protect the callchain buffers */ |
| 6980 | rcu_read_lock(); |
| 6981 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6982 | perf_prepare_sample(&header, data, event, regs); |
| 6983 | |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 6984 | err = output_begin(&handle, event, header.size); |
| 6985 | if (err) |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 6986 | goto exit; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6987 | |
| 6988 | perf_output_sample(&handle, &header, data, event); |
| 6989 | |
| 6990 | perf_output_end(&handle); |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 6991 | |
| 6992 | exit: |
| 6993 | rcu_read_unlock(); |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 6994 | return err; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6995 | } |
| 6996 | |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 6997 | void |
| 6998 | perf_event_output_forward(struct perf_event *event, |
| 6999 | struct perf_sample_data *data, |
| 7000 | struct pt_regs *regs) |
| 7001 | { |
| 7002 | __perf_event_output(event, data, regs, perf_output_begin_forward); |
| 7003 | } |
| 7004 | |
| 7005 | void |
| 7006 | perf_event_output_backward(struct perf_event *event, |
| 7007 | struct perf_sample_data *data, |
| 7008 | struct pt_regs *regs) |
| 7009 | { |
| 7010 | __perf_event_output(event, data, regs, perf_output_begin_backward); |
| 7011 | } |
| 7012 | |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 7013 | int |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 7014 | perf_event_output(struct perf_event *event, |
| 7015 | struct perf_sample_data *data, |
| 7016 | struct pt_regs *regs) |
| 7017 | { |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 7018 | return __perf_event_output(event, data, regs, perf_output_begin); |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 7019 | } |
| 7020 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7021 | /* |
| 7022 | * read event_id |
| 7023 | */ |
| 7024 | |
| 7025 | struct perf_read_event { |
| 7026 | struct perf_event_header header; |
| 7027 | |
| 7028 | u32 pid; |
| 7029 | u32 tid; |
| 7030 | }; |
| 7031 | |
| 7032 | static void |
| 7033 | perf_event_read_event(struct perf_event *event, |
| 7034 | struct task_struct *task) |
| 7035 | { |
| 7036 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7037 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7038 | struct perf_read_event read_event = { |
| 7039 | .header = { |
| 7040 | .type = PERF_RECORD_READ, |
| 7041 | .misc = 0, |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 7042 | .size = sizeof(read_event) + event->read_size, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7043 | }, |
| 7044 | .pid = perf_event_pid(event, task), |
| 7045 | .tid = perf_event_tid(event, task), |
| 7046 | }; |
| 7047 | int ret; |
| 7048 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7049 | perf_event_header__init_id(&read_event.header, &sample, event); |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 7050 | ret = perf_output_begin(&handle, event, read_event.header.size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7051 | if (ret) |
| 7052 | return; |
| 7053 | |
| 7054 | perf_output_put(&handle, read_event); |
| 7055 | perf_output_read(&handle, event); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7056 | perf_event__output_id_sample(event, &handle, &sample); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7057 | |
| 7058 | perf_output_end(&handle); |
| 7059 | } |
| 7060 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7061 | typedef void (perf_iterate_f)(struct perf_event *event, void *data); |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7062 | |
| 7063 | static void |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7064 | perf_iterate_ctx(struct perf_event_context *ctx, |
| 7065 | perf_iterate_f output, |
Alexander Shishkin | b73e4fe | 2016-04-27 18:44:45 +0300 | [diff] [blame] | 7066 | void *data, bool all) |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7067 | { |
| 7068 | struct perf_event *event; |
| 7069 | |
| 7070 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
Alexander Shishkin | b73e4fe | 2016-04-27 18:44:45 +0300 | [diff] [blame] | 7071 | if (!all) { |
| 7072 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 7073 | continue; |
| 7074 | if (!event_filter_match(event)) |
| 7075 | continue; |
| 7076 | } |
| 7077 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7078 | output(event, data); |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7079 | } |
| 7080 | } |
| 7081 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7082 | static void perf_iterate_sb_cpu(perf_iterate_f output, void *data) |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 7083 | { |
| 7084 | struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events); |
| 7085 | struct perf_event *event; |
| 7086 | |
| 7087 | list_for_each_entry_rcu(event, &pel->list, sb_list) { |
Peter Zijlstra | 0b8f1e2 | 2016-08-04 14:37:24 +0200 | [diff] [blame] | 7088 | /* |
| 7089 | * Skip events that are not fully formed yet; ensure that |
| 7090 | * if we observe event->ctx, both event and ctx will be |
| 7091 | * complete enough. See perf_install_in_context(). |
| 7092 | */ |
| 7093 | if (!smp_load_acquire(&event->ctx)) |
| 7094 | continue; |
| 7095 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 7096 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 7097 | continue; |
| 7098 | if (!event_filter_match(event)) |
| 7099 | continue; |
| 7100 | output(event, data); |
| 7101 | } |
| 7102 | } |
| 7103 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7104 | /* |
| 7105 | * Iterate all events that need to receive side-band events. |
| 7106 | * |
| 7107 | * For new callers; ensure that account_pmu_sb_event() includes |
| 7108 | * your event, otherwise it might not get delivered. |
| 7109 | */ |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 7110 | static void |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7111 | perf_iterate_sb(perf_iterate_f output, void *data, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7112 | struct perf_event_context *task_ctx) |
| 7113 | { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7114 | struct perf_event_context *ctx; |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7115 | int ctxn; |
| 7116 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7117 | rcu_read_lock(); |
| 7118 | preempt_disable(); |
| 7119 | |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 7120 | /* |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7121 | * If we have task_ctx != NULL we only notify the task context itself. |
| 7122 | * The task_ctx is set only for EXIT events before releasing task |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 7123 | * context. |
| 7124 | */ |
| 7125 | if (task_ctx) { |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7126 | perf_iterate_ctx(task_ctx, output, data, false); |
| 7127 | goto done; |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 7128 | } |
| 7129 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7130 | perf_iterate_sb_cpu(output, data); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 7131 | |
| 7132 | for_each_task_context_nr(ctxn) { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7133 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
| 7134 | if (ctx) |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7135 | perf_iterate_ctx(ctx, output, data, false); |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7136 | } |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7137 | done: |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 7138 | preempt_enable(); |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7139 | rcu_read_unlock(); |
| 7140 | } |
| 7141 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7142 | /* |
| 7143 | * Clear all file-based filters at exec, they'll have to be |
| 7144 | * re-instated when/if these objects are mmapped again. |
| 7145 | */ |
| 7146 | static void perf_event_addr_filters_exec(struct perf_event *event, void *data) |
| 7147 | { |
| 7148 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 7149 | struct perf_addr_filter *filter; |
| 7150 | unsigned int restart = 0, count = 0; |
| 7151 | unsigned long flags; |
| 7152 | |
| 7153 | if (!has_addr_filter(event)) |
| 7154 | return; |
| 7155 | |
| 7156 | raw_spin_lock_irqsave(&ifh->lock, flags); |
| 7157 | list_for_each_entry(filter, &ifh->list, entry) { |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 7158 | if (filter->path.dentry) { |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 7159 | event->addr_filter_ranges[count].start = 0; |
| 7160 | event->addr_filter_ranges[count].size = 0; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7161 | restart++; |
| 7162 | } |
| 7163 | |
| 7164 | count++; |
| 7165 | } |
| 7166 | |
| 7167 | if (restart) |
| 7168 | event->addr_filters_gen++; |
| 7169 | raw_spin_unlock_irqrestore(&ifh->lock, flags); |
| 7170 | |
| 7171 | if (restart) |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 7172 | perf_event_stop(event, 1); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7173 | } |
| 7174 | |
| 7175 | void perf_event_exec(void) |
| 7176 | { |
| 7177 | struct perf_event_context *ctx; |
| 7178 | int ctxn; |
| 7179 | |
| 7180 | rcu_read_lock(); |
| 7181 | for_each_task_context_nr(ctxn) { |
| 7182 | ctx = current->perf_event_ctxp[ctxn]; |
| 7183 | if (!ctx) |
| 7184 | continue; |
| 7185 | |
| 7186 | perf_event_enable_on_exec(ctxn); |
| 7187 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7188 | perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7189 | true); |
| 7190 | } |
| 7191 | rcu_read_unlock(); |
| 7192 | } |
| 7193 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7194 | struct remote_output { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 7195 | struct perf_buffer *rb; |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7196 | int err; |
| 7197 | }; |
| 7198 | |
| 7199 | static void __perf_event_output_stop(struct perf_event *event, void *data) |
| 7200 | { |
| 7201 | struct perf_event *parent = event->parent; |
| 7202 | struct remote_output *ro = data; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 7203 | struct perf_buffer *rb = ro->rb; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7204 | struct stop_event_data sd = { |
| 7205 | .event = event, |
| 7206 | }; |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7207 | |
| 7208 | if (!has_aux(event)) |
| 7209 | return; |
| 7210 | |
| 7211 | if (!parent) |
| 7212 | parent = event; |
| 7213 | |
| 7214 | /* |
| 7215 | * In case of inheritance, it will be the parent that links to the |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 7216 | * ring-buffer, but it will be the child that's actually using it. |
| 7217 | * |
| 7218 | * We are using event::rb to determine if the event should be stopped, |
| 7219 | * however this may race with ring_buffer_attach() (through set_output), |
| 7220 | * which will make us skip the event that actually needs to be stopped. |
| 7221 | * So ring_buffer_attach() has to stop an aux event before re-assigning |
| 7222 | * its rb pointer. |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7223 | */ |
| 7224 | if (rcu_dereference(parent->rb) == rb) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7225 | ro->err = __perf_event_stop(&sd); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7226 | } |
| 7227 | |
| 7228 | static int __perf_pmu_output_stop(void *info) |
| 7229 | { |
| 7230 | struct perf_event *event = info; |
Alexander Shishkin | f3a519e | 2019-10-22 10:39:40 +0300 | [diff] [blame] | 7231 | struct pmu *pmu = event->ctx->pmu; |
Will Deacon | 8b6a3fe | 2016-08-24 10:07:14 +0100 | [diff] [blame] | 7232 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7233 | struct remote_output ro = { |
| 7234 | .rb = event->rb, |
| 7235 | }; |
| 7236 | |
| 7237 | rcu_read_lock(); |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7238 | perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7239 | if (cpuctx->task_ctx) |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7240 | perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop, |
Alexander Shishkin | b73e4fe | 2016-04-27 18:44:45 +0300 | [diff] [blame] | 7241 | &ro, false); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7242 | rcu_read_unlock(); |
| 7243 | |
| 7244 | return ro.err; |
| 7245 | } |
| 7246 | |
| 7247 | static void perf_pmu_output_stop(struct perf_event *event) |
| 7248 | { |
| 7249 | struct perf_event *iter; |
| 7250 | int err, cpu; |
| 7251 | |
| 7252 | restart: |
| 7253 | rcu_read_lock(); |
| 7254 | list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { |
| 7255 | /* |
| 7256 | * For per-CPU events, we need to make sure that neither they |
| 7257 | * nor their children are running; for cpu==-1 events it's |
| 7258 | * sufficient to stop the event itself if it's active, since |
| 7259 | * it can't have children. |
| 7260 | */ |
| 7261 | cpu = iter->cpu; |
| 7262 | if (cpu == -1) |
| 7263 | cpu = READ_ONCE(iter->oncpu); |
| 7264 | |
| 7265 | if (cpu == -1) |
| 7266 | continue; |
| 7267 | |
| 7268 | err = cpu_function_call(cpu, __perf_pmu_output_stop, event); |
| 7269 | if (err == -EAGAIN) { |
| 7270 | rcu_read_unlock(); |
| 7271 | goto restart; |
| 7272 | } |
| 7273 | } |
| 7274 | rcu_read_unlock(); |
| 7275 | } |
| 7276 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7277 | /* |
| 7278 | * task tracking -- fork/exit |
| 7279 | * |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7280 | * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7281 | */ |
| 7282 | |
| 7283 | struct perf_task_event { |
| 7284 | struct task_struct *task; |
| 7285 | struct perf_event_context *task_ctx; |
| 7286 | |
| 7287 | struct { |
| 7288 | struct perf_event_header header; |
| 7289 | |
| 7290 | u32 pid; |
| 7291 | u32 ppid; |
| 7292 | u32 tid; |
| 7293 | u32 ptid; |
| 7294 | u64 time; |
| 7295 | } event_id; |
| 7296 | }; |
| 7297 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7298 | static int perf_event_task_match(struct perf_event *event) |
| 7299 | { |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7300 | return event->attr.comm || event->attr.mmap || |
| 7301 | event->attr.mmap2 || event->attr.mmap_data || |
| 7302 | event->attr.task; |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7303 | } |
| 7304 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7305 | static void perf_event_task_output(struct perf_event *event, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7306 | void *data) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7307 | { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7308 | struct perf_task_event *task_event = data; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7309 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7310 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7311 | struct task_struct *task = task_event->task; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7312 | int ret, size = task_event->event_id.header.size; |
Mike Galbraith | 8bb39f9 | 2010-03-26 11:11:33 +0100 | [diff] [blame] | 7313 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7314 | if (!perf_event_task_match(event)) |
| 7315 | return; |
| 7316 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7317 | perf_event_header__init_id(&task_event->event_id.header, &sample, event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7318 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7319 | ret = perf_output_begin(&handle, event, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 7320 | task_event->event_id.header.size); |
Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 7321 | if (ret) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7322 | goto out; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7323 | |
| 7324 | task_event->event_id.pid = perf_event_pid(event, task); |
| 7325 | task_event->event_id.ppid = perf_event_pid(event, current); |
| 7326 | |
| 7327 | task_event->event_id.tid = perf_event_tid(event, task); |
| 7328 | task_event->event_id.ptid = perf_event_tid(event, current); |
| 7329 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 7330 | task_event->event_id.time = perf_event_clock(event); |
| 7331 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7332 | perf_output_put(&handle, task_event->event_id); |
| 7333 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7334 | perf_event__output_id_sample(event, &handle, &sample); |
| 7335 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7336 | perf_output_end(&handle); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7337 | out: |
| 7338 | task_event->event_id.header.size = size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7339 | } |
| 7340 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7341 | static void perf_event_task(struct task_struct *task, |
| 7342 | struct perf_event_context *task_ctx, |
| 7343 | int new) |
| 7344 | { |
| 7345 | struct perf_task_event task_event; |
| 7346 | |
| 7347 | if (!atomic_read(&nr_comm_events) && |
| 7348 | !atomic_read(&nr_mmap_events) && |
| 7349 | !atomic_read(&nr_task_events)) |
| 7350 | return; |
| 7351 | |
| 7352 | task_event = (struct perf_task_event){ |
| 7353 | .task = task, |
| 7354 | .task_ctx = task_ctx, |
| 7355 | .event_id = { |
| 7356 | .header = { |
| 7357 | .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, |
| 7358 | .misc = 0, |
| 7359 | .size = sizeof(task_event.event_id), |
| 7360 | }, |
| 7361 | /* .pid */ |
| 7362 | /* .ppid */ |
| 7363 | /* .tid */ |
| 7364 | /* .ptid */ |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 7365 | /* .time */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7366 | }, |
| 7367 | }; |
| 7368 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7369 | perf_iterate_sb(perf_event_task_output, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7370 | &task_event, |
| 7371 | task_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7372 | } |
| 7373 | |
| 7374 | void perf_event_fork(struct task_struct *task) |
| 7375 | { |
| 7376 | perf_event_task(task, NULL, 1); |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 7377 | perf_event_namespaces(task); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7378 | } |
| 7379 | |
| 7380 | /* |
| 7381 | * comm tracking |
| 7382 | */ |
| 7383 | |
| 7384 | struct perf_comm_event { |
| 7385 | struct task_struct *task; |
| 7386 | char *comm; |
| 7387 | int comm_size; |
| 7388 | |
| 7389 | struct { |
| 7390 | struct perf_event_header header; |
| 7391 | |
| 7392 | u32 pid; |
| 7393 | u32 tid; |
| 7394 | } event_id; |
| 7395 | }; |
| 7396 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7397 | static int perf_event_comm_match(struct perf_event *event) |
| 7398 | { |
| 7399 | return event->attr.comm; |
| 7400 | } |
| 7401 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7402 | static void perf_event_comm_output(struct perf_event *event, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7403 | void *data) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7404 | { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7405 | struct perf_comm_event *comm_event = data; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7406 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7407 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7408 | int size = comm_event->event_id.header.size; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7409 | int ret; |
| 7410 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7411 | if (!perf_event_comm_match(event)) |
| 7412 | return; |
| 7413 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7414 | perf_event_header__init_id(&comm_event->event_id.header, &sample, event); |
| 7415 | ret = perf_output_begin(&handle, event, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 7416 | comm_event->event_id.header.size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7417 | |
| 7418 | if (ret) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7419 | goto out; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7420 | |
| 7421 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); |
| 7422 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); |
| 7423 | |
| 7424 | perf_output_put(&handle, comm_event->event_id); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 7425 | __output_copy(&handle, comm_event->comm, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7426 | comm_event->comm_size); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7427 | |
| 7428 | perf_event__output_id_sample(event, &handle, &sample); |
| 7429 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7430 | perf_output_end(&handle); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7431 | out: |
| 7432 | comm_event->event_id.header.size = size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7433 | } |
| 7434 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7435 | static void perf_event_comm_event(struct perf_comm_event *comm_event) |
| 7436 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7437 | char comm[TASK_COMM_LEN]; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7438 | unsigned int size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7439 | |
| 7440 | memset(comm, 0, sizeof(comm)); |
Márton Németh | 96b02d7 | 2009-11-21 23:10:15 +0100 | [diff] [blame] | 7441 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7442 | size = ALIGN(strlen(comm)+1, sizeof(u64)); |
| 7443 | |
| 7444 | comm_event->comm = comm; |
| 7445 | comm_event->comm_size = size; |
| 7446 | |
| 7447 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 7448 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7449 | perf_iterate_sb(perf_event_comm_output, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7450 | comm_event, |
| 7451 | NULL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7452 | } |
| 7453 | |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 7454 | void perf_event_comm(struct task_struct *task, bool exec) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7455 | { |
| 7456 | struct perf_comm_event comm_event; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7457 | |
| 7458 | if (!atomic_read(&nr_comm_events)) |
| 7459 | return; |
| 7460 | |
| 7461 | comm_event = (struct perf_comm_event){ |
| 7462 | .task = task, |
| 7463 | /* .comm */ |
| 7464 | /* .comm_size */ |
| 7465 | .event_id = { |
| 7466 | .header = { |
| 7467 | .type = PERF_RECORD_COMM, |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 7468 | .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7469 | /* .size */ |
| 7470 | }, |
| 7471 | /* .pid */ |
| 7472 | /* .tid */ |
| 7473 | }, |
| 7474 | }; |
| 7475 | |
| 7476 | perf_event_comm_event(&comm_event); |
| 7477 | } |
| 7478 | |
| 7479 | /* |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 7480 | * namespaces tracking |
| 7481 | */ |
| 7482 | |
| 7483 | struct perf_namespaces_event { |
| 7484 | struct task_struct *task; |
| 7485 | |
| 7486 | struct { |
| 7487 | struct perf_event_header header; |
| 7488 | |
| 7489 | u32 pid; |
| 7490 | u32 tid; |
| 7491 | u64 nr_namespaces; |
| 7492 | struct perf_ns_link_info link_info[NR_NAMESPACES]; |
| 7493 | } event_id; |
| 7494 | }; |
| 7495 | |
| 7496 | static int perf_event_namespaces_match(struct perf_event *event) |
| 7497 | { |
| 7498 | return event->attr.namespaces; |
| 7499 | } |
| 7500 | |
| 7501 | static void perf_event_namespaces_output(struct perf_event *event, |
| 7502 | void *data) |
| 7503 | { |
| 7504 | struct perf_namespaces_event *namespaces_event = data; |
| 7505 | struct perf_output_handle handle; |
| 7506 | struct perf_sample_data sample; |
Jiri Olsa | 34900ec | 2017-08-09 18:14:06 +0200 | [diff] [blame] | 7507 | u16 header_size = namespaces_event->event_id.header.size; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 7508 | int ret; |
| 7509 | |
| 7510 | if (!perf_event_namespaces_match(event)) |
| 7511 | return; |
| 7512 | |
| 7513 | perf_event_header__init_id(&namespaces_event->event_id.header, |
| 7514 | &sample, event); |
| 7515 | ret = perf_output_begin(&handle, event, |
| 7516 | namespaces_event->event_id.header.size); |
| 7517 | if (ret) |
Jiri Olsa | 34900ec | 2017-08-09 18:14:06 +0200 | [diff] [blame] | 7518 | goto out; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 7519 | |
| 7520 | namespaces_event->event_id.pid = perf_event_pid(event, |
| 7521 | namespaces_event->task); |
| 7522 | namespaces_event->event_id.tid = perf_event_tid(event, |
| 7523 | namespaces_event->task); |
| 7524 | |
| 7525 | perf_output_put(&handle, namespaces_event->event_id); |
| 7526 | |
| 7527 | perf_event__output_id_sample(event, &handle, &sample); |
| 7528 | |
| 7529 | perf_output_end(&handle); |
Jiri Olsa | 34900ec | 2017-08-09 18:14:06 +0200 | [diff] [blame] | 7530 | out: |
| 7531 | namespaces_event->event_id.header.size = header_size; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 7532 | } |
| 7533 | |
| 7534 | static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info, |
| 7535 | struct task_struct *task, |
| 7536 | const struct proc_ns_operations *ns_ops) |
| 7537 | { |
| 7538 | struct path ns_path; |
| 7539 | struct inode *ns_inode; |
Aleksa Sarai | ce623f8 | 2019-12-07 01:13:27 +1100 | [diff] [blame] | 7540 | int error; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 7541 | |
| 7542 | error = ns_get_path(&ns_path, task, ns_ops); |
| 7543 | if (!error) { |
| 7544 | ns_inode = ns_path.dentry->d_inode; |
| 7545 | ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev); |
| 7546 | ns_link_info->ino = ns_inode->i_ino; |
Vasily Averin | 0e18dd1 | 2017-11-15 08:47:02 +0300 | [diff] [blame] | 7547 | path_put(&ns_path); |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 7548 | } |
| 7549 | } |
| 7550 | |
| 7551 | void perf_event_namespaces(struct task_struct *task) |
| 7552 | { |
| 7553 | struct perf_namespaces_event namespaces_event; |
| 7554 | struct perf_ns_link_info *ns_link_info; |
| 7555 | |
| 7556 | if (!atomic_read(&nr_namespaces_events)) |
| 7557 | return; |
| 7558 | |
| 7559 | namespaces_event = (struct perf_namespaces_event){ |
| 7560 | .task = task, |
| 7561 | .event_id = { |
| 7562 | .header = { |
| 7563 | .type = PERF_RECORD_NAMESPACES, |
| 7564 | .misc = 0, |
| 7565 | .size = sizeof(namespaces_event.event_id), |
| 7566 | }, |
| 7567 | /* .pid */ |
| 7568 | /* .tid */ |
| 7569 | .nr_namespaces = NR_NAMESPACES, |
| 7570 | /* .link_info[NR_NAMESPACES] */ |
| 7571 | }, |
| 7572 | }; |
| 7573 | |
| 7574 | ns_link_info = namespaces_event.event_id.link_info; |
| 7575 | |
| 7576 | perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX], |
| 7577 | task, &mntns_operations); |
| 7578 | |
| 7579 | #ifdef CONFIG_USER_NS |
| 7580 | perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX], |
| 7581 | task, &userns_operations); |
| 7582 | #endif |
| 7583 | #ifdef CONFIG_NET_NS |
| 7584 | perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX], |
| 7585 | task, &netns_operations); |
| 7586 | #endif |
| 7587 | #ifdef CONFIG_UTS_NS |
| 7588 | perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX], |
| 7589 | task, &utsns_operations); |
| 7590 | #endif |
| 7591 | #ifdef CONFIG_IPC_NS |
| 7592 | perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX], |
| 7593 | task, &ipcns_operations); |
| 7594 | #endif |
| 7595 | #ifdef CONFIG_PID_NS |
| 7596 | perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX], |
| 7597 | task, &pidns_operations); |
| 7598 | #endif |
| 7599 | #ifdef CONFIG_CGROUPS |
| 7600 | perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX], |
| 7601 | task, &cgroupns_operations); |
| 7602 | #endif |
| 7603 | |
| 7604 | perf_iterate_sb(perf_event_namespaces_output, |
| 7605 | &namespaces_event, |
| 7606 | NULL); |
| 7607 | } |
| 7608 | |
| 7609 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7610 | * mmap tracking |
| 7611 | */ |
| 7612 | |
| 7613 | struct perf_mmap_event { |
| 7614 | struct vm_area_struct *vma; |
| 7615 | |
| 7616 | const char *file_name; |
| 7617 | int file_size; |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7618 | int maj, min; |
| 7619 | u64 ino; |
| 7620 | u64 ino_generation; |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 7621 | u32 prot, flags; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7622 | |
| 7623 | struct { |
| 7624 | struct perf_event_header header; |
| 7625 | |
| 7626 | u32 pid; |
| 7627 | u32 tid; |
| 7628 | u64 start; |
| 7629 | u64 len; |
| 7630 | u64 pgoff; |
| 7631 | } event_id; |
| 7632 | }; |
| 7633 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7634 | static int perf_event_mmap_match(struct perf_event *event, |
| 7635 | void *data) |
| 7636 | { |
| 7637 | struct perf_mmap_event *mmap_event = data; |
| 7638 | struct vm_area_struct *vma = mmap_event->vma; |
| 7639 | int executable = vma->vm_flags & VM_EXEC; |
| 7640 | |
| 7641 | return (!executable && event->attr.mmap_data) || |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7642 | (executable && (event->attr.mmap || event->attr.mmap2)); |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7643 | } |
| 7644 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7645 | static void perf_event_mmap_output(struct perf_event *event, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7646 | void *data) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7647 | { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7648 | struct perf_mmap_event *mmap_event = data; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7649 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7650 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7651 | int size = mmap_event->event_id.header.size; |
Stephane Eranian | d9c1bb2 | 2019-03-07 10:52:33 -0800 | [diff] [blame] | 7652 | u32 type = mmap_event->event_id.header.type; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7653 | int ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7654 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7655 | if (!perf_event_mmap_match(event, data)) |
| 7656 | return; |
| 7657 | |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7658 | if (event->attr.mmap2) { |
| 7659 | mmap_event->event_id.header.type = PERF_RECORD_MMAP2; |
| 7660 | mmap_event->event_id.header.size += sizeof(mmap_event->maj); |
| 7661 | mmap_event->event_id.header.size += sizeof(mmap_event->min); |
| 7662 | mmap_event->event_id.header.size += sizeof(mmap_event->ino); |
Arnaldo Carvalho de Melo | d008d52 | 2013-09-10 10:24:05 -0300 | [diff] [blame] | 7663 | mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 7664 | mmap_event->event_id.header.size += sizeof(mmap_event->prot); |
| 7665 | mmap_event->event_id.header.size += sizeof(mmap_event->flags); |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7666 | } |
| 7667 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7668 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); |
| 7669 | ret = perf_output_begin(&handle, event, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 7670 | mmap_event->event_id.header.size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7671 | if (ret) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7672 | goto out; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7673 | |
| 7674 | mmap_event->event_id.pid = perf_event_pid(event, current); |
| 7675 | mmap_event->event_id.tid = perf_event_tid(event, current); |
| 7676 | |
| 7677 | perf_output_put(&handle, mmap_event->event_id); |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7678 | |
| 7679 | if (event->attr.mmap2) { |
| 7680 | perf_output_put(&handle, mmap_event->maj); |
| 7681 | perf_output_put(&handle, mmap_event->min); |
| 7682 | perf_output_put(&handle, mmap_event->ino); |
| 7683 | perf_output_put(&handle, mmap_event->ino_generation); |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 7684 | perf_output_put(&handle, mmap_event->prot); |
| 7685 | perf_output_put(&handle, mmap_event->flags); |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7686 | } |
| 7687 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 7688 | __output_copy(&handle, mmap_event->file_name, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7689 | mmap_event->file_size); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7690 | |
| 7691 | perf_event__output_id_sample(event, &handle, &sample); |
| 7692 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7693 | perf_output_end(&handle); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7694 | out: |
| 7695 | mmap_event->event_id.header.size = size; |
Stephane Eranian | d9c1bb2 | 2019-03-07 10:52:33 -0800 | [diff] [blame] | 7696 | mmap_event->event_id.header.type = type; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7697 | } |
| 7698 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7699 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) |
| 7700 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7701 | struct vm_area_struct *vma = mmap_event->vma; |
| 7702 | struct file *file = vma->vm_file; |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7703 | int maj = 0, min = 0; |
| 7704 | u64 ino = 0, gen = 0; |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 7705 | u32 prot = 0, flags = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7706 | unsigned int size; |
| 7707 | char tmp[16]; |
| 7708 | char *buf = NULL; |
Peter Zijlstra | 2c42cfbf | 2013-10-17 00:06:46 +0200 | [diff] [blame] | 7709 | char *name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7710 | |
Peter Zijlstra | 0b3589b | 2017-01-26 23:15:08 +0100 | [diff] [blame] | 7711 | if (vma->vm_flags & VM_READ) |
| 7712 | prot |= PROT_READ; |
| 7713 | if (vma->vm_flags & VM_WRITE) |
| 7714 | prot |= PROT_WRITE; |
| 7715 | if (vma->vm_flags & VM_EXEC) |
| 7716 | prot |= PROT_EXEC; |
| 7717 | |
| 7718 | if (vma->vm_flags & VM_MAYSHARE) |
| 7719 | flags = MAP_SHARED; |
| 7720 | else |
| 7721 | flags = MAP_PRIVATE; |
| 7722 | |
| 7723 | if (vma->vm_flags & VM_DENYWRITE) |
| 7724 | flags |= MAP_DENYWRITE; |
| 7725 | if (vma->vm_flags & VM_MAYEXEC) |
| 7726 | flags |= MAP_EXECUTABLE; |
| 7727 | if (vma->vm_flags & VM_LOCKED) |
| 7728 | flags |= MAP_LOCKED; |
| 7729 | if (vma->vm_flags & VM_HUGETLB) |
| 7730 | flags |= MAP_HUGETLB; |
| 7731 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7732 | if (file) { |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7733 | struct inode *inode; |
| 7734 | dev_t dev; |
Oleg Nesterov | 3ea2f2b | 2013-10-16 22:10:04 +0200 | [diff] [blame] | 7735 | |
Peter Zijlstra | 2c42cfbf | 2013-10-17 00:06:46 +0200 | [diff] [blame] | 7736 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7737 | if (!buf) { |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 7738 | name = "//enomem"; |
| 7739 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7740 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7741 | /* |
Oleg Nesterov | 3ea2f2b | 2013-10-16 22:10:04 +0200 | [diff] [blame] | 7742 | * d_path() works from the end of the rb backwards, so we |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7743 | * need to add enough zero bytes after the string to handle |
| 7744 | * the 64bit alignment we do later. |
| 7745 | */ |
Miklos Szeredi | 9bf39ab | 2015-06-19 10:29:13 +0200 | [diff] [blame] | 7746 | name = file_path(file, buf, PATH_MAX - sizeof(u64)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7747 | if (IS_ERR(name)) { |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 7748 | name = "//toolong"; |
| 7749 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7750 | } |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7751 | inode = file_inode(vma->vm_file); |
| 7752 | dev = inode->i_sb->s_dev; |
| 7753 | ino = inode->i_ino; |
| 7754 | gen = inode->i_generation; |
| 7755 | maj = MAJOR(dev); |
| 7756 | min = MINOR(dev); |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 7757 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7758 | goto got_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7759 | } else { |
Jiri Olsa | fbe26ab | 2014-07-14 17:57:19 +0200 | [diff] [blame] | 7760 | if (vma->vm_ops && vma->vm_ops->name) { |
| 7761 | name = (char *) vma->vm_ops->name(vma); |
| 7762 | if (name) |
| 7763 | goto cpy_name; |
| 7764 | } |
| 7765 | |
Peter Zijlstra | 2c42cfbf | 2013-10-17 00:06:46 +0200 | [diff] [blame] | 7766 | name = (char *)arch_vma_name(vma); |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 7767 | if (name) |
| 7768 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7769 | |
Oleg Nesterov | 32c5fb7 | 2013-10-16 22:09:45 +0200 | [diff] [blame] | 7770 | if (vma->vm_start <= vma->vm_mm->start_brk && |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7771 | vma->vm_end >= vma->vm_mm->brk) { |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 7772 | name = "[heap]"; |
| 7773 | goto cpy_name; |
Oleg Nesterov | 32c5fb7 | 2013-10-16 22:09:45 +0200 | [diff] [blame] | 7774 | } |
| 7775 | if (vma->vm_start <= vma->vm_mm->start_stack && |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7776 | vma->vm_end >= vma->vm_mm->start_stack) { |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 7777 | name = "[stack]"; |
| 7778 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7779 | } |
| 7780 | |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 7781 | name = "//anon"; |
| 7782 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7783 | } |
| 7784 | |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 7785 | cpy_name: |
| 7786 | strlcpy(tmp, name, sizeof(tmp)); |
| 7787 | name = tmp; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7788 | got_name: |
Peter Zijlstra | 2c42cfbf | 2013-10-17 00:06:46 +0200 | [diff] [blame] | 7789 | /* |
| 7790 | * Since our buffer works in 8 byte units we need to align our string |
| 7791 | * size to a multiple of 8. However, we must guarantee the tail end is |
| 7792 | * zero'd out to avoid leaking random bits to userspace. |
| 7793 | */ |
| 7794 | size = strlen(name)+1; |
| 7795 | while (!IS_ALIGNED(size, sizeof(u64))) |
| 7796 | name[size++] = '\0'; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7797 | |
| 7798 | mmap_event->file_name = name; |
| 7799 | mmap_event->file_size = size; |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7800 | mmap_event->maj = maj; |
| 7801 | mmap_event->min = min; |
| 7802 | mmap_event->ino = ino; |
| 7803 | mmap_event->ino_generation = gen; |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 7804 | mmap_event->prot = prot; |
| 7805 | mmap_event->flags = flags; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7806 | |
Stephane Eranian | 2fe8542 | 2013-01-24 16:10:39 +0100 | [diff] [blame] | 7807 | if (!(vma->vm_flags & VM_EXEC)) |
| 7808 | mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; |
| 7809 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7810 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; |
| 7811 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7812 | perf_iterate_sb(perf_event_mmap_output, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7813 | mmap_event, |
| 7814 | NULL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7815 | |
| 7816 | kfree(buf); |
| 7817 | } |
| 7818 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7819 | /* |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7820 | * Check whether inode and address range match filter criteria. |
| 7821 | */ |
| 7822 | static bool perf_addr_filter_match(struct perf_addr_filter *filter, |
| 7823 | struct file *file, unsigned long offset, |
| 7824 | unsigned long size) |
| 7825 | { |
Mathieu Poirier | 7f635ff | 2018-07-16 17:13:51 -0600 | [diff] [blame] | 7826 | /* d_inode(NULL) won't be equal to any mapped user-space file */ |
| 7827 | if (!filter->path.dentry) |
| 7828 | return false; |
| 7829 | |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 7830 | if (d_inode(filter->path.dentry) != file_inode(file)) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7831 | return false; |
| 7832 | |
| 7833 | if (filter->offset > offset + size) |
| 7834 | return false; |
| 7835 | |
| 7836 | if (filter->offset + filter->size < offset) |
| 7837 | return false; |
| 7838 | |
| 7839 | return true; |
| 7840 | } |
| 7841 | |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 7842 | static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter, |
| 7843 | struct vm_area_struct *vma, |
| 7844 | struct perf_addr_filter_range *fr) |
| 7845 | { |
| 7846 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
| 7847 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; |
| 7848 | struct file *file = vma->vm_file; |
| 7849 | |
| 7850 | if (!perf_addr_filter_match(filter, file, off, vma_size)) |
| 7851 | return false; |
| 7852 | |
| 7853 | if (filter->offset < off) { |
| 7854 | fr->start = vma->vm_start; |
| 7855 | fr->size = min(vma_size, filter->size - (off - filter->offset)); |
| 7856 | } else { |
| 7857 | fr->start = vma->vm_start + filter->offset - off; |
| 7858 | fr->size = min(vma->vm_end - fr->start, filter->size); |
| 7859 | } |
| 7860 | |
| 7861 | return true; |
| 7862 | } |
| 7863 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7864 | static void __perf_addr_filters_adjust(struct perf_event *event, void *data) |
| 7865 | { |
| 7866 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 7867 | struct vm_area_struct *vma = data; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7868 | struct perf_addr_filter *filter; |
| 7869 | unsigned int restart = 0, count = 0; |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 7870 | unsigned long flags; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7871 | |
| 7872 | if (!has_addr_filter(event)) |
| 7873 | return; |
| 7874 | |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 7875 | if (!vma->vm_file) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7876 | return; |
| 7877 | |
| 7878 | raw_spin_lock_irqsave(&ifh->lock, flags); |
| 7879 | list_for_each_entry(filter, &ifh->list, entry) { |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 7880 | if (perf_addr_filter_vma_adjust(filter, vma, |
| 7881 | &event->addr_filter_ranges[count])) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7882 | restart++; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7883 | |
| 7884 | count++; |
| 7885 | } |
| 7886 | |
| 7887 | if (restart) |
| 7888 | event->addr_filters_gen++; |
| 7889 | raw_spin_unlock_irqrestore(&ifh->lock, flags); |
| 7890 | |
| 7891 | if (restart) |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 7892 | perf_event_stop(event, 1); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7893 | } |
| 7894 | |
| 7895 | /* |
| 7896 | * Adjust all task's events' filters to the new vma |
| 7897 | */ |
| 7898 | static void perf_addr_filters_adjust(struct vm_area_struct *vma) |
| 7899 | { |
| 7900 | struct perf_event_context *ctx; |
| 7901 | int ctxn; |
| 7902 | |
Mathieu Poirier | 12b40a2 | 2016-07-18 10:43:06 -0600 | [diff] [blame] | 7903 | /* |
| 7904 | * Data tracing isn't supported yet and as such there is no need |
| 7905 | * to keep track of anything that isn't related to executable code: |
| 7906 | */ |
| 7907 | if (!(vma->vm_flags & VM_EXEC)) |
| 7908 | return; |
| 7909 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7910 | rcu_read_lock(); |
| 7911 | for_each_task_context_nr(ctxn) { |
| 7912 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
| 7913 | if (!ctx) |
| 7914 | continue; |
| 7915 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7916 | perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7917 | } |
| 7918 | rcu_read_unlock(); |
| 7919 | } |
| 7920 | |
Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 7921 | void perf_event_mmap(struct vm_area_struct *vma) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7922 | { |
| 7923 | struct perf_mmap_event mmap_event; |
| 7924 | |
| 7925 | if (!atomic_read(&nr_mmap_events)) |
| 7926 | return; |
| 7927 | |
| 7928 | mmap_event = (struct perf_mmap_event){ |
| 7929 | .vma = vma, |
| 7930 | /* .file_name */ |
| 7931 | /* .file_size */ |
| 7932 | .event_id = { |
| 7933 | .header = { |
| 7934 | .type = PERF_RECORD_MMAP, |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 7935 | .misc = PERF_RECORD_MISC_USER, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7936 | /* .size */ |
| 7937 | }, |
| 7938 | /* .pid */ |
| 7939 | /* .tid */ |
| 7940 | .start = vma->vm_start, |
| 7941 | .len = vma->vm_end - vma->vm_start, |
Peter Zijlstra | 3a0304e | 2010-02-26 10:33:41 +0100 | [diff] [blame] | 7942 | .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7943 | }, |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7944 | /* .maj (attr_mmap2 only) */ |
| 7945 | /* .min (attr_mmap2 only) */ |
| 7946 | /* .ino (attr_mmap2 only) */ |
| 7947 | /* .ino_generation (attr_mmap2 only) */ |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 7948 | /* .prot (attr_mmap2 only) */ |
| 7949 | /* .flags (attr_mmap2 only) */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7950 | }; |
| 7951 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7952 | perf_addr_filters_adjust(vma); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7953 | perf_event_mmap_event(&mmap_event); |
| 7954 | } |
| 7955 | |
Alexander Shishkin | 68db7e9 | 2015-01-14 14:18:15 +0200 | [diff] [blame] | 7956 | void perf_event_aux_event(struct perf_event *event, unsigned long head, |
| 7957 | unsigned long size, u64 flags) |
| 7958 | { |
| 7959 | struct perf_output_handle handle; |
| 7960 | struct perf_sample_data sample; |
| 7961 | struct perf_aux_event { |
| 7962 | struct perf_event_header header; |
| 7963 | u64 offset; |
| 7964 | u64 size; |
| 7965 | u64 flags; |
| 7966 | } rec = { |
| 7967 | .header = { |
| 7968 | .type = PERF_RECORD_AUX, |
| 7969 | .misc = 0, |
| 7970 | .size = sizeof(rec), |
| 7971 | }, |
| 7972 | .offset = head, |
| 7973 | .size = size, |
| 7974 | .flags = flags, |
| 7975 | }; |
| 7976 | int ret; |
| 7977 | |
| 7978 | perf_event_header__init_id(&rec.header, &sample, event); |
| 7979 | ret = perf_output_begin(&handle, event, rec.header.size); |
| 7980 | |
| 7981 | if (ret) |
| 7982 | return; |
| 7983 | |
| 7984 | perf_output_put(&handle, rec); |
| 7985 | perf_event__output_id_sample(event, &handle, &sample); |
| 7986 | |
| 7987 | perf_output_end(&handle); |
| 7988 | } |
| 7989 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7990 | /* |
Kan Liang | f38b0db | 2015-05-10 15:13:14 -0400 | [diff] [blame] | 7991 | * Lost/dropped samples logging |
| 7992 | */ |
| 7993 | void perf_log_lost_samples(struct perf_event *event, u64 lost) |
| 7994 | { |
| 7995 | struct perf_output_handle handle; |
| 7996 | struct perf_sample_data sample; |
| 7997 | int ret; |
| 7998 | |
| 7999 | struct { |
| 8000 | struct perf_event_header header; |
| 8001 | u64 lost; |
| 8002 | } lost_samples_event = { |
| 8003 | .header = { |
| 8004 | .type = PERF_RECORD_LOST_SAMPLES, |
| 8005 | .misc = 0, |
| 8006 | .size = sizeof(lost_samples_event), |
| 8007 | }, |
| 8008 | .lost = lost, |
| 8009 | }; |
| 8010 | |
| 8011 | perf_event_header__init_id(&lost_samples_event.header, &sample, event); |
| 8012 | |
| 8013 | ret = perf_output_begin(&handle, event, |
| 8014 | lost_samples_event.header.size); |
| 8015 | if (ret) |
| 8016 | return; |
| 8017 | |
| 8018 | perf_output_put(&handle, lost_samples_event); |
| 8019 | perf_event__output_id_sample(event, &handle, &sample); |
| 8020 | perf_output_end(&handle); |
| 8021 | } |
| 8022 | |
| 8023 | /* |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 8024 | * context_switch tracking |
| 8025 | */ |
| 8026 | |
| 8027 | struct perf_switch_event { |
| 8028 | struct task_struct *task; |
| 8029 | struct task_struct *next_prev; |
| 8030 | |
| 8031 | struct { |
| 8032 | struct perf_event_header header; |
| 8033 | u32 next_prev_pid; |
| 8034 | u32 next_prev_tid; |
| 8035 | } event_id; |
| 8036 | }; |
| 8037 | |
| 8038 | static int perf_event_switch_match(struct perf_event *event) |
| 8039 | { |
| 8040 | return event->attr.context_switch; |
| 8041 | } |
| 8042 | |
| 8043 | static void perf_event_switch_output(struct perf_event *event, void *data) |
| 8044 | { |
| 8045 | struct perf_switch_event *se = data; |
| 8046 | struct perf_output_handle handle; |
| 8047 | struct perf_sample_data sample; |
| 8048 | int ret; |
| 8049 | |
| 8050 | if (!perf_event_switch_match(event)) |
| 8051 | return; |
| 8052 | |
| 8053 | /* Only CPU-wide events are allowed to see next/prev pid/tid */ |
| 8054 | if (event->ctx->task) { |
| 8055 | se->event_id.header.type = PERF_RECORD_SWITCH; |
| 8056 | se->event_id.header.size = sizeof(se->event_id.header); |
| 8057 | } else { |
| 8058 | se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; |
| 8059 | se->event_id.header.size = sizeof(se->event_id); |
| 8060 | se->event_id.next_prev_pid = |
| 8061 | perf_event_pid(event, se->next_prev); |
| 8062 | se->event_id.next_prev_tid = |
| 8063 | perf_event_tid(event, se->next_prev); |
| 8064 | } |
| 8065 | |
| 8066 | perf_event_header__init_id(&se->event_id.header, &sample, event); |
| 8067 | |
| 8068 | ret = perf_output_begin(&handle, event, se->event_id.header.size); |
| 8069 | if (ret) |
| 8070 | return; |
| 8071 | |
| 8072 | if (event->ctx->task) |
| 8073 | perf_output_put(&handle, se->event_id.header); |
| 8074 | else |
| 8075 | perf_output_put(&handle, se->event_id); |
| 8076 | |
| 8077 | perf_event__output_id_sample(event, &handle, &sample); |
| 8078 | |
| 8079 | perf_output_end(&handle); |
| 8080 | } |
| 8081 | |
| 8082 | static void perf_event_switch(struct task_struct *task, |
| 8083 | struct task_struct *next_prev, bool sched_in) |
| 8084 | { |
| 8085 | struct perf_switch_event switch_event; |
| 8086 | |
| 8087 | /* N.B. caller checks nr_switch_events != 0 */ |
| 8088 | |
| 8089 | switch_event = (struct perf_switch_event){ |
| 8090 | .task = task, |
| 8091 | .next_prev = next_prev, |
| 8092 | .event_id = { |
| 8093 | .header = { |
| 8094 | /* .type */ |
| 8095 | .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, |
| 8096 | /* .size */ |
| 8097 | }, |
| 8098 | /* .next_prev_pid */ |
| 8099 | /* .next_prev_tid */ |
| 8100 | }, |
| 8101 | }; |
| 8102 | |
Alexey Budankov | 101592b | 2018-04-09 10:25:32 +0300 | [diff] [blame] | 8103 | if (!sched_in && task->state == TASK_RUNNING) |
| 8104 | switch_event.event_id.header.misc |= |
| 8105 | PERF_RECORD_MISC_SWITCH_OUT_PREEMPT; |
| 8106 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 8107 | perf_iterate_sb(perf_event_switch_output, |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 8108 | &switch_event, |
| 8109 | NULL); |
| 8110 | } |
| 8111 | |
| 8112 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8113 | * IRQ throttle logging |
| 8114 | */ |
| 8115 | |
| 8116 | static void perf_log_throttle(struct perf_event *event, int enable) |
| 8117 | { |
| 8118 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8119 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8120 | int ret; |
| 8121 | |
| 8122 | struct { |
| 8123 | struct perf_event_header header; |
| 8124 | u64 time; |
| 8125 | u64 id; |
| 8126 | u64 stream_id; |
| 8127 | } throttle_event = { |
| 8128 | .header = { |
| 8129 | .type = PERF_RECORD_THROTTLE, |
| 8130 | .misc = 0, |
| 8131 | .size = sizeof(throttle_event), |
| 8132 | }, |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 8133 | .time = perf_event_clock(event), |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8134 | .id = primary_event_id(event), |
| 8135 | .stream_id = event->id, |
| 8136 | }; |
| 8137 | |
| 8138 | if (enable) |
| 8139 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; |
| 8140 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8141 | perf_event_header__init_id(&throttle_event.header, &sample, event); |
| 8142 | |
| 8143 | ret = perf_output_begin(&handle, event, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 8144 | throttle_event.header.size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8145 | if (ret) |
| 8146 | return; |
| 8147 | |
| 8148 | perf_output_put(&handle, throttle_event); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8149 | perf_event__output_id_sample(event, &handle, &sample); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8150 | perf_output_end(&handle); |
| 8151 | } |
| 8152 | |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 8153 | /* |
| 8154 | * ksymbol register/unregister tracking |
| 8155 | */ |
| 8156 | |
| 8157 | struct perf_ksymbol_event { |
| 8158 | const char *name; |
| 8159 | int name_len; |
| 8160 | struct { |
| 8161 | struct perf_event_header header; |
| 8162 | u64 addr; |
| 8163 | u32 len; |
| 8164 | u16 ksym_type; |
| 8165 | u16 flags; |
| 8166 | } event_id; |
| 8167 | }; |
| 8168 | |
| 8169 | static int perf_event_ksymbol_match(struct perf_event *event) |
| 8170 | { |
| 8171 | return event->attr.ksymbol; |
| 8172 | } |
| 8173 | |
| 8174 | static void perf_event_ksymbol_output(struct perf_event *event, void *data) |
| 8175 | { |
| 8176 | struct perf_ksymbol_event *ksymbol_event = data; |
| 8177 | struct perf_output_handle handle; |
| 8178 | struct perf_sample_data sample; |
| 8179 | int ret; |
| 8180 | |
| 8181 | if (!perf_event_ksymbol_match(event)) |
| 8182 | return; |
| 8183 | |
| 8184 | perf_event_header__init_id(&ksymbol_event->event_id.header, |
| 8185 | &sample, event); |
| 8186 | ret = perf_output_begin(&handle, event, |
| 8187 | ksymbol_event->event_id.header.size); |
| 8188 | if (ret) |
| 8189 | return; |
| 8190 | |
| 8191 | perf_output_put(&handle, ksymbol_event->event_id); |
| 8192 | __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len); |
| 8193 | perf_event__output_id_sample(event, &handle, &sample); |
| 8194 | |
| 8195 | perf_output_end(&handle); |
| 8196 | } |
| 8197 | |
| 8198 | void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, |
| 8199 | const char *sym) |
| 8200 | { |
| 8201 | struct perf_ksymbol_event ksymbol_event; |
| 8202 | char name[KSYM_NAME_LEN]; |
| 8203 | u16 flags = 0; |
| 8204 | int name_len; |
| 8205 | |
| 8206 | if (!atomic_read(&nr_ksymbol_events)) |
| 8207 | return; |
| 8208 | |
| 8209 | if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX || |
| 8210 | ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN) |
| 8211 | goto err; |
| 8212 | |
| 8213 | strlcpy(name, sym, KSYM_NAME_LEN); |
| 8214 | name_len = strlen(name) + 1; |
| 8215 | while (!IS_ALIGNED(name_len, sizeof(u64))) |
| 8216 | name[name_len++] = '\0'; |
| 8217 | BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64)); |
| 8218 | |
| 8219 | if (unregister) |
| 8220 | flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER; |
| 8221 | |
| 8222 | ksymbol_event = (struct perf_ksymbol_event){ |
| 8223 | .name = name, |
| 8224 | .name_len = name_len, |
| 8225 | .event_id = { |
| 8226 | .header = { |
| 8227 | .type = PERF_RECORD_KSYMBOL, |
| 8228 | .size = sizeof(ksymbol_event.event_id) + |
| 8229 | name_len, |
| 8230 | }, |
| 8231 | .addr = addr, |
| 8232 | .len = len, |
| 8233 | .ksym_type = ksym_type, |
| 8234 | .flags = flags, |
| 8235 | }, |
| 8236 | }; |
| 8237 | |
| 8238 | perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL); |
| 8239 | return; |
| 8240 | err: |
| 8241 | WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type); |
| 8242 | } |
| 8243 | |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 8244 | /* |
| 8245 | * bpf program load/unload tracking |
| 8246 | */ |
| 8247 | |
| 8248 | struct perf_bpf_event { |
| 8249 | struct bpf_prog *prog; |
| 8250 | struct { |
| 8251 | struct perf_event_header header; |
| 8252 | u16 type; |
| 8253 | u16 flags; |
| 8254 | u32 id; |
| 8255 | u8 tag[BPF_TAG_SIZE]; |
| 8256 | } event_id; |
| 8257 | }; |
| 8258 | |
| 8259 | static int perf_event_bpf_match(struct perf_event *event) |
| 8260 | { |
| 8261 | return event->attr.bpf_event; |
| 8262 | } |
| 8263 | |
| 8264 | static void perf_event_bpf_output(struct perf_event *event, void *data) |
| 8265 | { |
| 8266 | struct perf_bpf_event *bpf_event = data; |
| 8267 | struct perf_output_handle handle; |
| 8268 | struct perf_sample_data sample; |
| 8269 | int ret; |
| 8270 | |
| 8271 | if (!perf_event_bpf_match(event)) |
| 8272 | return; |
| 8273 | |
| 8274 | perf_event_header__init_id(&bpf_event->event_id.header, |
| 8275 | &sample, event); |
| 8276 | ret = perf_output_begin(&handle, event, |
| 8277 | bpf_event->event_id.header.size); |
| 8278 | if (ret) |
| 8279 | return; |
| 8280 | |
| 8281 | perf_output_put(&handle, bpf_event->event_id); |
| 8282 | perf_event__output_id_sample(event, &handle, &sample); |
| 8283 | |
| 8284 | perf_output_end(&handle); |
| 8285 | } |
| 8286 | |
| 8287 | static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog, |
| 8288 | enum perf_bpf_event_type type) |
| 8289 | { |
| 8290 | bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD; |
| 8291 | char sym[KSYM_NAME_LEN]; |
| 8292 | int i; |
| 8293 | |
| 8294 | if (prog->aux->func_cnt == 0) { |
| 8295 | bpf_get_prog_name(prog, sym); |
| 8296 | perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, |
| 8297 | (u64)(unsigned long)prog->bpf_func, |
| 8298 | prog->jited_len, unregister, sym); |
| 8299 | } else { |
| 8300 | for (i = 0; i < prog->aux->func_cnt; i++) { |
| 8301 | struct bpf_prog *subprog = prog->aux->func[i]; |
| 8302 | |
| 8303 | bpf_get_prog_name(subprog, sym); |
| 8304 | perf_event_ksymbol( |
| 8305 | PERF_RECORD_KSYMBOL_TYPE_BPF, |
| 8306 | (u64)(unsigned long)subprog->bpf_func, |
| 8307 | subprog->jited_len, unregister, sym); |
| 8308 | } |
| 8309 | } |
| 8310 | } |
| 8311 | |
| 8312 | void perf_event_bpf_event(struct bpf_prog *prog, |
| 8313 | enum perf_bpf_event_type type, |
| 8314 | u16 flags) |
| 8315 | { |
| 8316 | struct perf_bpf_event bpf_event; |
| 8317 | |
| 8318 | if (type <= PERF_BPF_EVENT_UNKNOWN || |
| 8319 | type >= PERF_BPF_EVENT_MAX) |
| 8320 | return; |
| 8321 | |
| 8322 | switch (type) { |
| 8323 | case PERF_BPF_EVENT_PROG_LOAD: |
| 8324 | case PERF_BPF_EVENT_PROG_UNLOAD: |
| 8325 | if (atomic_read(&nr_ksymbol_events)) |
| 8326 | perf_event_bpf_emit_ksymbols(prog, type); |
| 8327 | break; |
| 8328 | default: |
| 8329 | break; |
| 8330 | } |
| 8331 | |
| 8332 | if (!atomic_read(&nr_bpf_events)) |
| 8333 | return; |
| 8334 | |
| 8335 | bpf_event = (struct perf_bpf_event){ |
| 8336 | .prog = prog, |
| 8337 | .event_id = { |
| 8338 | .header = { |
| 8339 | .type = PERF_RECORD_BPF_EVENT, |
| 8340 | .size = sizeof(bpf_event.event_id), |
| 8341 | }, |
| 8342 | .type = type, |
| 8343 | .flags = flags, |
| 8344 | .id = prog->aux->id, |
| 8345 | }, |
| 8346 | }; |
| 8347 | |
| 8348 | BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64)); |
| 8349 | |
| 8350 | memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE); |
| 8351 | perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL); |
| 8352 | } |
| 8353 | |
Alexander Shishkin | 8d4e6c4 | 2017-03-30 18:39:56 +0300 | [diff] [blame] | 8354 | void perf_event_itrace_started(struct perf_event *event) |
| 8355 | { |
| 8356 | event->attach_state |= PERF_ATTACH_ITRACE; |
| 8357 | } |
| 8358 | |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 8359 | static void perf_log_itrace_start(struct perf_event *event) |
| 8360 | { |
| 8361 | struct perf_output_handle handle; |
| 8362 | struct perf_sample_data sample; |
| 8363 | struct perf_aux_event { |
| 8364 | struct perf_event_header header; |
| 8365 | u32 pid; |
| 8366 | u32 tid; |
| 8367 | } rec; |
| 8368 | int ret; |
| 8369 | |
| 8370 | if (event->parent) |
| 8371 | event = event->parent; |
| 8372 | |
| 8373 | if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || |
Alexander Shishkin | 8d4e6c4 | 2017-03-30 18:39:56 +0300 | [diff] [blame] | 8374 | event->attach_state & PERF_ATTACH_ITRACE) |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 8375 | return; |
| 8376 | |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 8377 | rec.header.type = PERF_RECORD_ITRACE_START; |
| 8378 | rec.header.misc = 0; |
| 8379 | rec.header.size = sizeof(rec); |
| 8380 | rec.pid = perf_event_pid(event, current); |
| 8381 | rec.tid = perf_event_tid(event, current); |
| 8382 | |
| 8383 | perf_event_header__init_id(&rec.header, &sample, event); |
| 8384 | ret = perf_output_begin(&handle, event, rec.header.size); |
| 8385 | |
| 8386 | if (ret) |
| 8387 | return; |
| 8388 | |
| 8389 | perf_output_put(&handle, rec); |
| 8390 | perf_event__output_id_sample(event, &handle, &sample); |
| 8391 | |
| 8392 | perf_output_end(&handle); |
| 8393 | } |
| 8394 | |
Jiri Olsa | 475113d | 2016-12-28 14:31:03 +0100 | [diff] [blame] | 8395 | static int |
| 8396 | __perf_event_account_interrupt(struct perf_event *event, int throttle) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8397 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8398 | struct hw_perf_event *hwc = &event->hw; |
| 8399 | int ret = 0; |
Jiri Olsa | 475113d | 2016-12-28 14:31:03 +0100 | [diff] [blame] | 8400 | u64 seq; |
Peter Zijlstra | 9639882 | 2010-11-24 18:55:29 +0100 | [diff] [blame] | 8401 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 8402 | seq = __this_cpu_read(perf_throttled_seq); |
| 8403 | if (seq != hwc->interrupts_seq) { |
| 8404 | hwc->interrupts_seq = seq; |
| 8405 | hwc->interrupts = 1; |
| 8406 | } else { |
| 8407 | hwc->interrupts++; |
| 8408 | if (unlikely(throttle |
| 8409 | && hwc->interrupts >= max_samples_per_tick)) { |
| 8410 | __this_cpu_inc(perf_throttled_count); |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 8411 | tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 8412 | hwc->interrupts = MAX_INTERRUPTS; |
| 8413 | perf_log_throttle(event, 0); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8414 | ret = 1; |
| 8415 | } |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 8416 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8417 | |
| 8418 | if (event->attr.freq) { |
| 8419 | u64 now = perf_clock(); |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 8420 | s64 delta = now - hwc->freq_time_stamp; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8421 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 8422 | hwc->freq_time_stamp = now; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8423 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 8424 | if (delta > 0 && delta < 2*TICK_NSEC) |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 8425 | perf_adjust_period(event, delta, hwc->last_period, true); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8426 | } |
| 8427 | |
Jiri Olsa | 475113d | 2016-12-28 14:31:03 +0100 | [diff] [blame] | 8428 | return ret; |
| 8429 | } |
| 8430 | |
| 8431 | int perf_event_account_interrupt(struct perf_event *event) |
| 8432 | { |
| 8433 | return __perf_event_account_interrupt(event, 1); |
| 8434 | } |
| 8435 | |
| 8436 | /* |
| 8437 | * Generic event overflow handling, sampling. |
| 8438 | */ |
| 8439 | |
| 8440 | static int __perf_event_overflow(struct perf_event *event, |
| 8441 | int throttle, struct perf_sample_data *data, |
| 8442 | struct pt_regs *regs) |
| 8443 | { |
| 8444 | int events = atomic_read(&event->event_limit); |
| 8445 | int ret = 0; |
| 8446 | |
| 8447 | /* |
| 8448 | * Non-sampling counters might still use the PMI to fold short |
| 8449 | * hardware counters, ignore those. |
| 8450 | */ |
| 8451 | if (unlikely(!is_sampling_event(event))) |
| 8452 | return 0; |
| 8453 | |
| 8454 | ret = __perf_event_account_interrupt(event, throttle); |
| 8455 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8456 | /* |
| 8457 | * XXX event_limit might not quite work as expected on inherited |
| 8458 | * events |
| 8459 | */ |
| 8460 | |
| 8461 | event->pending_kill = POLL_IN; |
| 8462 | if (events && atomic_dec_and_test(&event->event_limit)) { |
| 8463 | ret = 1; |
| 8464 | event->pending_kill = POLL_HUP; |
Jiri Olsa | 5aab90c | 2016-10-26 11:48:24 +0200 | [diff] [blame] | 8465 | |
| 8466 | perf_event_disable_inatomic(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8467 | } |
| 8468 | |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 8469 | READ_ONCE(event->overflow_handler)(event, data, regs); |
Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 8470 | |
Peter Zijlstra | fed66e2cd | 2015-06-11 10:32:01 +0200 | [diff] [blame] | 8471 | if (*perf_event_fasync(event) && event->pending_kill) { |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8472 | event->pending_wakeup = 1; |
| 8473 | irq_work_queue(&event->pending); |
Peter Zijlstra | f506b3d | 2011-05-26 17:02:53 +0200 | [diff] [blame] | 8474 | } |
| 8475 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8476 | return ret; |
| 8477 | } |
| 8478 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8479 | int perf_event_overflow(struct perf_event *event, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8480 | struct perf_sample_data *data, |
| 8481 | struct pt_regs *regs) |
| 8482 | { |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8483 | return __perf_event_overflow(event, 1, data, regs); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8484 | } |
| 8485 | |
| 8486 | /* |
| 8487 | * Generic software event infrastructure |
| 8488 | */ |
| 8489 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8490 | struct swevent_htable { |
| 8491 | struct swevent_hlist *swevent_hlist; |
| 8492 | struct mutex hlist_mutex; |
| 8493 | int hlist_refcount; |
| 8494 | |
| 8495 | /* Recursion avoidance in each contexts */ |
| 8496 | int recursion[PERF_NR_CONTEXTS]; |
| 8497 | }; |
| 8498 | |
| 8499 | static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); |
| 8500 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8501 | /* |
| 8502 | * We directly increment event->count and keep a second value in |
| 8503 | * event->hw.period_left to count intervals. This period event |
| 8504 | * is kept in the range [-sample_period, 0] so that we can use the |
| 8505 | * sign as trigger. |
| 8506 | */ |
| 8507 | |
Jiri Olsa | ab57384 | 2013-05-01 17:25:44 +0200 | [diff] [blame] | 8508 | u64 perf_swevent_set_period(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8509 | { |
| 8510 | struct hw_perf_event *hwc = &event->hw; |
| 8511 | u64 period = hwc->last_period; |
| 8512 | u64 nr, offset; |
| 8513 | s64 old, val; |
| 8514 | |
| 8515 | hwc->last_period = hwc->sample_period; |
| 8516 | |
| 8517 | again: |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 8518 | old = val = local64_read(&hwc->period_left); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8519 | if (val < 0) |
| 8520 | return 0; |
| 8521 | |
| 8522 | nr = div64_u64(period + val, period); |
| 8523 | offset = nr * period; |
| 8524 | val -= offset; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 8525 | if (local64_cmpxchg(&hwc->period_left, old, val) != old) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8526 | goto again; |
| 8527 | |
| 8528 | return nr; |
| 8529 | } |
| 8530 | |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 8531 | static void perf_swevent_overflow(struct perf_event *event, u64 overflow, |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8532 | struct perf_sample_data *data, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8533 | struct pt_regs *regs) |
| 8534 | { |
| 8535 | struct hw_perf_event *hwc = &event->hw; |
| 8536 | int throttle = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8537 | |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 8538 | if (!overflow) |
| 8539 | overflow = perf_swevent_set_period(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8540 | |
| 8541 | if (hwc->interrupts == MAX_INTERRUPTS) |
| 8542 | return; |
| 8543 | |
| 8544 | for (; overflow; overflow--) { |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8545 | if (__perf_event_overflow(event, throttle, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8546 | data, regs)) { |
| 8547 | /* |
| 8548 | * We inhibit the overflow from happening when |
| 8549 | * hwc->interrupts == MAX_INTERRUPTS. |
| 8550 | */ |
| 8551 | break; |
| 8552 | } |
| 8553 | throttle = 1; |
| 8554 | } |
| 8555 | } |
| 8556 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8557 | static void perf_swevent_event(struct perf_event *event, u64 nr, |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8558 | struct perf_sample_data *data, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8559 | struct pt_regs *regs) |
| 8560 | { |
| 8561 | struct hw_perf_event *hwc = &event->hw; |
| 8562 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 8563 | local64_add(nr, &event->count); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8564 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8565 | if (!regs) |
| 8566 | return; |
| 8567 | |
Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 8568 | if (!is_sampling_event(event)) |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 8569 | return; |
| 8570 | |
Andrew Vagin | 5d81e5c | 2011-11-07 15:54:12 +0300 | [diff] [blame] | 8571 | if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { |
| 8572 | data->period = nr; |
| 8573 | return perf_swevent_overflow(event, 1, data, regs); |
| 8574 | } else |
| 8575 | data->period = event->hw.last_period; |
| 8576 | |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 8577 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8578 | return perf_swevent_overflow(event, 1, data, regs); |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 8579 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 8580 | if (local64_add_negative(nr, &hwc->period_left)) |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 8581 | return; |
| 8582 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8583 | perf_swevent_overflow(event, 0, data, regs); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8584 | } |
| 8585 | |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 8586 | static int perf_exclude_event(struct perf_event *event, |
| 8587 | struct pt_regs *regs) |
| 8588 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8589 | if (event->hw.state & PERF_HES_STOPPED) |
Frederic Weisbecker | 91b2f48 | 2011-03-07 21:27:08 +0100 | [diff] [blame] | 8590 | return 1; |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8591 | |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 8592 | if (regs) { |
| 8593 | if (event->attr.exclude_user && user_mode(regs)) |
| 8594 | return 1; |
| 8595 | |
| 8596 | if (event->attr.exclude_kernel && !user_mode(regs)) |
| 8597 | return 1; |
| 8598 | } |
| 8599 | |
| 8600 | return 0; |
| 8601 | } |
| 8602 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8603 | static int perf_swevent_match(struct perf_event *event, |
| 8604 | enum perf_type_id type, |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 8605 | u32 event_id, |
| 8606 | struct perf_sample_data *data, |
| 8607 | struct pt_regs *regs) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8608 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8609 | if (event->attr.type != type) |
| 8610 | return 0; |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 8611 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8612 | if (event->attr.config != event_id) |
| 8613 | return 0; |
| 8614 | |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 8615 | if (perf_exclude_event(event, regs)) |
| 8616 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8617 | |
| 8618 | return 1; |
| 8619 | } |
| 8620 | |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8621 | static inline u64 swevent_hash(u64 type, u32 event_id) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8622 | { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8623 | u64 val = event_id | (type << 32); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8624 | |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8625 | return hash_64(val, SWEVENT_HLIST_BITS); |
| 8626 | } |
| 8627 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8628 | static inline struct hlist_head * |
| 8629 | __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8630 | { |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8631 | u64 hash = swevent_hash(type, event_id); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8632 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8633 | return &hlist->heads[hash]; |
| 8634 | } |
| 8635 | |
| 8636 | /* For the read side: events when they trigger */ |
| 8637 | static inline struct hlist_head * |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8638 | find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8639 | { |
| 8640 | struct swevent_hlist *hlist; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8641 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8642 | hlist = rcu_dereference(swhash->swevent_hlist); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8643 | if (!hlist) |
| 8644 | return NULL; |
| 8645 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8646 | return __find_swevent_head(hlist, type, event_id); |
| 8647 | } |
| 8648 | |
| 8649 | /* For the event head insertion and removal in the hlist */ |
| 8650 | static inline struct hlist_head * |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8651 | find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8652 | { |
| 8653 | struct swevent_hlist *hlist; |
| 8654 | u32 event_id = event->attr.config; |
| 8655 | u64 type = event->attr.type; |
| 8656 | |
| 8657 | /* |
| 8658 | * Event scheduling is always serialized against hlist allocation |
| 8659 | * and release. Which makes the protected version suitable here. |
| 8660 | * The context lock guarantees that. |
| 8661 | */ |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8662 | hlist = rcu_dereference_protected(swhash->swevent_hlist, |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8663 | lockdep_is_held(&event->ctx->lock)); |
| 8664 | if (!hlist) |
| 8665 | return NULL; |
| 8666 | |
| 8667 | return __find_swevent_head(hlist, type, event_id); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8668 | } |
| 8669 | |
| 8670 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8671 | u64 nr, |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8672 | struct perf_sample_data *data, |
| 8673 | struct pt_regs *regs) |
| 8674 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 8675 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8676 | struct perf_event *event; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8677 | struct hlist_head *head; |
| 8678 | |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8679 | rcu_read_lock(); |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8680 | head = find_swevent_head_rcu(swhash, type, event_id); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8681 | if (!head) |
| 8682 | goto end; |
| 8683 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 8684 | hlist_for_each_entry_rcu(event, head, hlist_entry) { |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 8685 | if (perf_swevent_match(event, type, event_id, data, regs)) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 8686 | perf_swevent_event(event, nr, data, regs); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8687 | } |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8688 | end: |
| 8689 | rcu_read_unlock(); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8690 | } |
| 8691 | |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 8692 | DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); |
| 8693 | |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 8694 | int perf_swevent_get_recursion_context(void) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8695 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 8696 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 8697 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8698 | return get_recursion_context(swhash->recursion); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8699 | } |
Ingo Molnar | 645e8cc | 2009-11-22 12:20:19 +0100 | [diff] [blame] | 8700 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8701 | |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 8702 | void perf_swevent_put_recursion_context(int rctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8703 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 8704 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 8705 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8706 | put_recursion_context(swhash->recursion, rctx); |
Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 8707 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8708 | |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 8709 | void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8710 | { |
Ingo Molnar | a4234bf | 2009-11-23 10:57:59 +0100 | [diff] [blame] | 8711 | struct perf_sample_data data; |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 8712 | |
| 8713 | if (WARN_ON_ONCE(!regs)) |
| 8714 | return; |
| 8715 | |
| 8716 | perf_sample_data_init(&data, addr, 0); |
| 8717 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); |
| 8718 | } |
| 8719 | |
| 8720 | void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
| 8721 | { |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 8722 | int rctx; |
| 8723 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8724 | preempt_disable_notrace(); |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 8725 | rctx = perf_swevent_get_recursion_context(); |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 8726 | if (unlikely(rctx < 0)) |
| 8727 | goto fail; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8728 | |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 8729 | ___perf_sw_event(event_id, nr, regs, addr); |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 8730 | |
| 8731 | perf_swevent_put_recursion_context(rctx); |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 8732 | fail: |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8733 | preempt_enable_notrace(); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8734 | } |
| 8735 | |
| 8736 | static void perf_swevent_read(struct perf_event *event) |
| 8737 | { |
| 8738 | } |
| 8739 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8740 | static int perf_swevent_add(struct perf_event *event, int flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8741 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 8742 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8743 | struct hw_perf_event *hwc = &event->hw; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8744 | struct hlist_head *head; |
| 8745 | |
Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 8746 | if (is_sampling_event(event)) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8747 | hwc->last_period = hwc->sample_period; |
| 8748 | perf_swevent_set_period(event); |
| 8749 | } |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8750 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8751 | hwc->state = !(flags & PERF_EF_START); |
| 8752 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8753 | head = find_swevent_head(swhash, event); |
Peter Zijlstra | 12ca6ad | 2015-12-15 13:49:05 +0100 | [diff] [blame] | 8754 | if (WARN_ON_ONCE(!head)) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8755 | return -EINVAL; |
| 8756 | |
| 8757 | hlist_add_head_rcu(&event->hlist_entry, head); |
Shaohua Li | 6a694a6 | 2015-02-05 15:55:32 -0800 | [diff] [blame] | 8758 | perf_event_update_userpage(event); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8759 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8760 | return 0; |
| 8761 | } |
| 8762 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8763 | static void perf_swevent_del(struct perf_event *event, int flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8764 | { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8765 | hlist_del_rcu(&event->hlist_entry); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8766 | } |
| 8767 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8768 | static void perf_swevent_start(struct perf_event *event, int flags) |
Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 8769 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8770 | event->hw.state = 0; |
Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 8771 | } |
| 8772 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8773 | static void perf_swevent_stop(struct perf_event *event, int flags) |
Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 8774 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8775 | event->hw.state = PERF_HES_STOPPED; |
Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 8776 | } |
| 8777 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8778 | /* Deref the hlist from the update side */ |
| 8779 | static inline struct swevent_hlist * |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8780 | swevent_hlist_deref(struct swevent_htable *swhash) |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8781 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8782 | return rcu_dereference_protected(swhash->swevent_hlist, |
| 8783 | lockdep_is_held(&swhash->hlist_mutex)); |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8784 | } |
| 8785 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8786 | static void swevent_hlist_release(struct swevent_htable *swhash) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8787 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8788 | struct swevent_hlist *hlist = swevent_hlist_deref(swhash); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8789 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 8790 | if (!hlist) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8791 | return; |
| 8792 | |
Andreea-Cristina Bernat | 70691d4 | 2014-08-22 16:26:05 +0300 | [diff] [blame] | 8793 | RCU_INIT_POINTER(swhash->swevent_hlist, NULL); |
Lai Jiangshan | fa4bbc4 | 2011-03-18 12:08:29 +0800 | [diff] [blame] | 8794 | kfree_rcu(hlist, rcu_head); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8795 | } |
| 8796 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8797 | static void swevent_hlist_put_cpu(int cpu) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8798 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8799 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8800 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8801 | mutex_lock(&swhash->hlist_mutex); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8802 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8803 | if (!--swhash->hlist_refcount) |
| 8804 | swevent_hlist_release(swhash); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8805 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8806 | mutex_unlock(&swhash->hlist_mutex); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8807 | } |
| 8808 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8809 | static void swevent_hlist_put(void) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8810 | { |
| 8811 | int cpu; |
| 8812 | |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8813 | for_each_possible_cpu(cpu) |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8814 | swevent_hlist_put_cpu(cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8815 | } |
| 8816 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8817 | static int swevent_hlist_get_cpu(int cpu) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8818 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8819 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8820 | int err = 0; |
| 8821 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8822 | mutex_lock(&swhash->hlist_mutex); |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 8823 | if (!swevent_hlist_deref(swhash) && |
| 8824 | cpumask_test_cpu(cpu, perf_online_mask)) { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8825 | struct swevent_hlist *hlist; |
| 8826 | |
| 8827 | hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); |
| 8828 | if (!hlist) { |
| 8829 | err = -ENOMEM; |
| 8830 | goto exit; |
| 8831 | } |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8832 | rcu_assign_pointer(swhash->swevent_hlist, hlist); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8833 | } |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8834 | swhash->hlist_refcount++; |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 8835 | exit: |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 8836 | mutex_unlock(&swhash->hlist_mutex); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8837 | |
| 8838 | return err; |
| 8839 | } |
| 8840 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8841 | static int swevent_hlist_get(void) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8842 | { |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8843 | int err, cpu, failed_cpu; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8844 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 8845 | mutex_lock(&pmus_lock); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8846 | for_each_possible_cpu(cpu) { |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8847 | err = swevent_hlist_get_cpu(cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8848 | if (err) { |
| 8849 | failed_cpu = cpu; |
| 8850 | goto fail; |
| 8851 | } |
| 8852 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 8853 | mutex_unlock(&pmus_lock); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8854 | return 0; |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 8855 | fail: |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8856 | for_each_possible_cpu(cpu) { |
| 8857 | if (cpu == failed_cpu) |
| 8858 | break; |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8859 | swevent_hlist_put_cpu(cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8860 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 8861 | mutex_unlock(&pmus_lock); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 8862 | return err; |
| 8863 | } |
| 8864 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 8865 | struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 8866 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8867 | static void sw_perf_event_destroy(struct perf_event *event) |
| 8868 | { |
| 8869 | u64 event_id = event->attr.config; |
| 8870 | |
| 8871 | WARN_ON(event->parent); |
| 8872 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 8873 | static_key_slow_dec(&perf_swevent_enabled[event_id]); |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8874 | swevent_hlist_put(); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8875 | } |
| 8876 | |
| 8877 | static int perf_swevent_init(struct perf_event *event) |
| 8878 | { |
Tommi Rantala | 8176cce | 2013-04-13 22:49:14 +0300 | [diff] [blame] | 8879 | u64 event_id = event->attr.config; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8880 | |
| 8881 | if (event->attr.type != PERF_TYPE_SOFTWARE) |
| 8882 | return -ENOENT; |
| 8883 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 8884 | /* |
| 8885 | * no branch sampling for software events |
| 8886 | */ |
| 8887 | if (has_branch_stack(event)) |
| 8888 | return -EOPNOTSUPP; |
| 8889 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8890 | switch (event_id) { |
| 8891 | case PERF_COUNT_SW_CPU_CLOCK: |
| 8892 | case PERF_COUNT_SW_TASK_CLOCK: |
| 8893 | return -ENOENT; |
| 8894 | |
| 8895 | default: |
| 8896 | break; |
| 8897 | } |
| 8898 | |
Dan Carpenter | ce67783 | 2010-10-24 21:50:42 +0200 | [diff] [blame] | 8899 | if (event_id >= PERF_COUNT_SW_MAX) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8900 | return -ENOENT; |
| 8901 | |
| 8902 | if (!event->parent) { |
| 8903 | int err; |
| 8904 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 8905 | err = swevent_hlist_get(); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8906 | if (err) |
| 8907 | return err; |
| 8908 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 8909 | static_key_slow_inc(&perf_swevent_enabled[event_id]); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8910 | event->destroy = sw_perf_event_destroy; |
| 8911 | } |
| 8912 | |
| 8913 | return 0; |
| 8914 | } |
| 8915 | |
| 8916 | static struct pmu perf_swevent = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 8917 | .task_ctx_nr = perf_sw_context, |
| 8918 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 8919 | .capabilities = PERF_PMU_CAP_NO_NMI, |
| 8920 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8921 | .event_init = perf_swevent_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 8922 | .add = perf_swevent_add, |
| 8923 | .del = perf_swevent_del, |
| 8924 | .start = perf_swevent_start, |
| 8925 | .stop = perf_swevent_stop, |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8926 | .read = perf_swevent_read, |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8927 | }; |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 8928 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 8929 | #ifdef CONFIG_EVENT_TRACING |
| 8930 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8931 | static int perf_tp_filter_match(struct perf_event *event, |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 8932 | struct perf_sample_data *data) |
| 8933 | { |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 8934 | void *record = data->raw->frag.data; |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 8935 | |
Peter Zijlstra | b71b437 | 2015-11-02 10:50:51 +0100 | [diff] [blame] | 8936 | /* only top level events have filters set */ |
| 8937 | if (event->parent) |
| 8938 | event = event->parent; |
| 8939 | |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 8940 | if (likely(!event->filter) || filter_match_preds(event->filter, record)) |
| 8941 | return 1; |
| 8942 | return 0; |
| 8943 | } |
| 8944 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8945 | static int perf_tp_event_match(struct perf_event *event, |
| 8946 | struct perf_sample_data *data, |
| 8947 | struct pt_regs *regs) |
| 8948 | { |
Frederic Weisbecker | a0f7d0f | 2011-03-07 21:27:09 +0100 | [diff] [blame] | 8949 | if (event->hw.state & PERF_HES_STOPPED) |
| 8950 | return 0; |
Peter Zijlstra | 580d607 | 2010-05-20 20:54:31 +0200 | [diff] [blame] | 8951 | /* |
Song Liu | 9fd2e48 | 2019-05-07 09:15:45 -0700 | [diff] [blame] | 8952 | * If exclude_kernel, only trace user-space tracepoints (uprobes) |
Peter Zijlstra | 580d607 | 2010-05-20 20:54:31 +0200 | [diff] [blame] | 8953 | */ |
Song Liu | 9fd2e48 | 2019-05-07 09:15:45 -0700 | [diff] [blame] | 8954 | if (event->attr.exclude_kernel && !user_mode(regs)) |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8955 | return 0; |
| 8956 | |
| 8957 | if (!perf_tp_filter_match(event, data)) |
| 8958 | return 0; |
| 8959 | |
| 8960 | return 1; |
| 8961 | } |
| 8962 | |
Alexei Starovoitov | 85b67bc | 2016-04-18 20:11:50 -0700 | [diff] [blame] | 8963 | void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, |
| 8964 | struct trace_event_call *call, u64 count, |
| 8965 | struct pt_regs *regs, struct hlist_head *head, |
| 8966 | struct task_struct *task) |
| 8967 | { |
Yonghong Song | e87c6bc | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 8968 | if (bpf_prog_array_valid(call)) { |
Alexei Starovoitov | 85b67bc | 2016-04-18 20:11:50 -0700 | [diff] [blame] | 8969 | *(struct pt_regs **)raw_data = regs; |
Yonghong Song | e87c6bc | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 8970 | if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) { |
Alexei Starovoitov | 85b67bc | 2016-04-18 20:11:50 -0700 | [diff] [blame] | 8971 | perf_swevent_put_recursion_context(rctx); |
| 8972 | return; |
| 8973 | } |
| 8974 | } |
| 8975 | perf_tp_event(call->event.type, count, raw_data, size, regs, head, |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 8976 | rctx, task); |
Alexei Starovoitov | 85b67bc | 2016-04-18 20:11:50 -0700 | [diff] [blame] | 8977 | } |
| 8978 | EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); |
| 8979 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 8980 | void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 8981 | struct pt_regs *regs, struct hlist_head *head, int rctx, |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 8982 | struct task_struct *task) |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8983 | { |
| 8984 | struct perf_sample_data data; |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 8985 | struct perf_event *event; |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8986 | |
| 8987 | struct perf_raw_record raw = { |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 8988 | .frag = { |
| 8989 | .size = entry_size, |
| 8990 | .data = record, |
| 8991 | }, |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8992 | }; |
| 8993 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 8994 | perf_sample_data_init(&data, 0, 0); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 8995 | data.raw = &raw; |
| 8996 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 8997 | perf_trace_buf_update(record, event_type); |
| 8998 | |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 8999 | hlist_for_each_entry_rcu(event, head, hlist_entry) { |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9000 | if (perf_tp_event_match(event, &data, regs)) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9001 | perf_swevent_event(event, count, &data, regs); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9002 | } |
Peter Zijlstra | ecc55f8 | 2010-05-21 15:11:34 +0200 | [diff] [blame] | 9003 | |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 9004 | /* |
| 9005 | * If we got specified a target task, also iterate its context and |
| 9006 | * deliver this event there too. |
| 9007 | */ |
| 9008 | if (task && task != current) { |
| 9009 | struct perf_event_context *ctx; |
| 9010 | struct trace_entry *entry = record; |
| 9011 | |
| 9012 | rcu_read_lock(); |
| 9013 | ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); |
| 9014 | if (!ctx) |
| 9015 | goto unlock; |
| 9016 | |
| 9017 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
Jiri Olsa | cd6fb677 | 2018-09-23 18:13:43 +0200 | [diff] [blame] | 9018 | if (event->cpu != smp_processor_id()) |
| 9019 | continue; |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 9020 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
| 9021 | continue; |
| 9022 | if (event->attr.config != entry->type) |
| 9023 | continue; |
| 9024 | if (perf_tp_event_match(event, &data, regs)) |
| 9025 | perf_swevent_event(event, count, &data, regs); |
| 9026 | } |
| 9027 | unlock: |
| 9028 | rcu_read_unlock(); |
| 9029 | } |
| 9030 | |
Peter Zijlstra | ecc55f8 | 2010-05-21 15:11:34 +0200 | [diff] [blame] | 9031 | perf_swevent_put_recursion_context(rctx); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9032 | } |
| 9033 | EXPORT_SYMBOL_GPL(perf_tp_event); |
| 9034 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9035 | static void tp_perf_event_destroy(struct perf_event *event) |
| 9036 | { |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9037 | perf_trace_destroy(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9038 | } |
| 9039 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9040 | static int perf_tp_event_init(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9041 | { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9042 | int err; |
| 9043 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9044 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
| 9045 | return -ENOENT; |
| 9046 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 9047 | /* |
| 9048 | * no branch sampling for tracepoint events |
| 9049 | */ |
| 9050 | if (has_branch_stack(event)) |
| 9051 | return -EOPNOTSUPP; |
| 9052 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9053 | err = perf_trace_init(event); |
| 9054 | if (err) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9055 | return err; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9056 | |
| 9057 | event->destroy = tp_perf_event_destroy; |
| 9058 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9059 | return 0; |
| 9060 | } |
| 9061 | |
| 9062 | static struct pmu perf_tracepoint = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 9063 | .task_ctx_nr = perf_sw_context, |
| 9064 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9065 | .event_init = perf_tp_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9066 | .add = perf_trace_add, |
| 9067 | .del = perf_trace_del, |
| 9068 | .start = perf_swevent_start, |
| 9069 | .stop = perf_swevent_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9070 | .read = perf_swevent_read, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9071 | }; |
| 9072 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9073 | #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9074 | /* |
| 9075 | * Flags in config, used by dynamic PMU kprobe and uprobe |
| 9076 | * The flags should match following PMU_FORMAT_ATTR(). |
| 9077 | * |
| 9078 | * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe |
| 9079 | * if not set, create kprobe/uprobe |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9080 | * |
| 9081 | * The following values specify a reference counter (or semaphore in the |
| 9082 | * terminology of tools like dtrace, systemtap, etc.) Userspace Statically |
| 9083 | * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset. |
| 9084 | * |
| 9085 | * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset |
| 9086 | * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9087 | */ |
| 9088 | enum perf_probe_config { |
| 9089 | PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */ |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9090 | PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, |
| 9091 | PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS, |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9092 | }; |
| 9093 | |
| 9094 | PMU_FORMAT_ATTR(retprobe, "config:0"); |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9095 | #endif |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9096 | |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9097 | #ifdef CONFIG_KPROBE_EVENTS |
| 9098 | static struct attribute *kprobe_attrs[] = { |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9099 | &format_attr_retprobe.attr, |
| 9100 | NULL, |
| 9101 | }; |
| 9102 | |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9103 | static struct attribute_group kprobe_format_group = { |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9104 | .name = "format", |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9105 | .attrs = kprobe_attrs, |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9106 | }; |
| 9107 | |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9108 | static const struct attribute_group *kprobe_attr_groups[] = { |
| 9109 | &kprobe_format_group, |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9110 | NULL, |
| 9111 | }; |
| 9112 | |
| 9113 | static int perf_kprobe_event_init(struct perf_event *event); |
| 9114 | static struct pmu perf_kprobe = { |
| 9115 | .task_ctx_nr = perf_sw_context, |
| 9116 | .event_init = perf_kprobe_event_init, |
| 9117 | .add = perf_trace_add, |
| 9118 | .del = perf_trace_del, |
| 9119 | .start = perf_swevent_start, |
| 9120 | .stop = perf_swevent_stop, |
| 9121 | .read = perf_swevent_read, |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9122 | .attr_groups = kprobe_attr_groups, |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9123 | }; |
| 9124 | |
| 9125 | static int perf_kprobe_event_init(struct perf_event *event) |
| 9126 | { |
| 9127 | int err; |
| 9128 | bool is_retprobe; |
| 9129 | |
| 9130 | if (event->attr.type != perf_kprobe.type) |
| 9131 | return -ENOENT; |
Song Liu | 32e6e96 | 2018-04-11 18:02:37 +0000 | [diff] [blame] | 9132 | |
| 9133 | if (!capable(CAP_SYS_ADMIN)) |
| 9134 | return -EACCES; |
| 9135 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9136 | /* |
| 9137 | * no branch sampling for probe events |
| 9138 | */ |
| 9139 | if (has_branch_stack(event)) |
| 9140 | return -EOPNOTSUPP; |
| 9141 | |
| 9142 | is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; |
| 9143 | err = perf_kprobe_init(event, is_retprobe); |
| 9144 | if (err) |
| 9145 | return err; |
| 9146 | |
| 9147 | event->destroy = perf_kprobe_destroy; |
| 9148 | |
| 9149 | return 0; |
| 9150 | } |
| 9151 | #endif /* CONFIG_KPROBE_EVENTS */ |
| 9152 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9153 | #ifdef CONFIG_UPROBE_EVENTS |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9154 | PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63"); |
| 9155 | |
| 9156 | static struct attribute *uprobe_attrs[] = { |
| 9157 | &format_attr_retprobe.attr, |
| 9158 | &format_attr_ref_ctr_offset.attr, |
| 9159 | NULL, |
| 9160 | }; |
| 9161 | |
| 9162 | static struct attribute_group uprobe_format_group = { |
| 9163 | .name = "format", |
| 9164 | .attrs = uprobe_attrs, |
| 9165 | }; |
| 9166 | |
| 9167 | static const struct attribute_group *uprobe_attr_groups[] = { |
| 9168 | &uprobe_format_group, |
| 9169 | NULL, |
| 9170 | }; |
| 9171 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9172 | static int perf_uprobe_event_init(struct perf_event *event); |
| 9173 | static struct pmu perf_uprobe = { |
| 9174 | .task_ctx_nr = perf_sw_context, |
| 9175 | .event_init = perf_uprobe_event_init, |
| 9176 | .add = perf_trace_add, |
| 9177 | .del = perf_trace_del, |
| 9178 | .start = perf_swevent_start, |
| 9179 | .stop = perf_swevent_stop, |
| 9180 | .read = perf_swevent_read, |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9181 | .attr_groups = uprobe_attr_groups, |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9182 | }; |
| 9183 | |
| 9184 | static int perf_uprobe_event_init(struct perf_event *event) |
| 9185 | { |
| 9186 | int err; |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9187 | unsigned long ref_ctr_offset; |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9188 | bool is_retprobe; |
| 9189 | |
| 9190 | if (event->attr.type != perf_uprobe.type) |
| 9191 | return -ENOENT; |
Song Liu | 32e6e96 | 2018-04-11 18:02:37 +0000 | [diff] [blame] | 9192 | |
| 9193 | if (!capable(CAP_SYS_ADMIN)) |
| 9194 | return -EACCES; |
| 9195 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9196 | /* |
| 9197 | * no branch sampling for probe events |
| 9198 | */ |
| 9199 | if (has_branch_stack(event)) |
| 9200 | return -EOPNOTSUPP; |
| 9201 | |
| 9202 | is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9203 | ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; |
| 9204 | err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9205 | if (err) |
| 9206 | return err; |
| 9207 | |
| 9208 | event->destroy = perf_uprobe_destroy; |
| 9209 | |
| 9210 | return 0; |
| 9211 | } |
| 9212 | #endif /* CONFIG_UPROBE_EVENTS */ |
| 9213 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9214 | static inline void perf_tp_register(void) |
| 9215 | { |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 9216 | perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9217 | #ifdef CONFIG_KPROBE_EVENTS |
| 9218 | perf_pmu_register(&perf_kprobe, "kprobe", -1); |
| 9219 | #endif |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9220 | #ifdef CONFIG_UPROBE_EVENTS |
| 9221 | perf_pmu_register(&perf_uprobe, "uprobe", -1); |
| 9222 | #endif |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9223 | } |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 9224 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 9225 | static void perf_event_free_filter(struct perf_event *event) |
| 9226 | { |
| 9227 | ftrace_profile_free_filter(event); |
| 9228 | } |
| 9229 | |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 9230 | #ifdef CONFIG_BPF_SYSCALL |
| 9231 | static void bpf_overflow_handler(struct perf_event *event, |
| 9232 | struct perf_sample_data *data, |
| 9233 | struct pt_regs *regs) |
| 9234 | { |
| 9235 | struct bpf_perf_event_data_kern ctx = { |
| 9236 | .data = data, |
Yonghong Song | 7d9285e | 2017-10-05 09:19:19 -0700 | [diff] [blame] | 9237 | .event = event, |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 9238 | }; |
| 9239 | int ret = 0; |
| 9240 | |
Hendrik Brueckner | c895f6f | 2017-12-04 10:56:44 +0100 | [diff] [blame] | 9241 | ctx.regs = perf_arch_bpf_user_pt_regs(regs); |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 9242 | preempt_disable(); |
| 9243 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) |
| 9244 | goto out; |
| 9245 | rcu_read_lock(); |
Daniel Borkmann | 8857519 | 2016-11-26 01:28:04 +0100 | [diff] [blame] | 9246 | ret = BPF_PROG_RUN(event->prog, &ctx); |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 9247 | rcu_read_unlock(); |
| 9248 | out: |
| 9249 | __this_cpu_dec(bpf_prog_active); |
| 9250 | preempt_enable(); |
| 9251 | if (!ret) |
| 9252 | return; |
| 9253 | |
| 9254 | event->orig_overflow_handler(event, data, regs); |
| 9255 | } |
| 9256 | |
| 9257 | static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) |
| 9258 | { |
| 9259 | struct bpf_prog *prog; |
| 9260 | |
| 9261 | if (event->overflow_handler_context) |
| 9262 | /* hw breakpoint or kernel counter */ |
| 9263 | return -EINVAL; |
| 9264 | |
| 9265 | if (event->prog) |
| 9266 | return -EEXIST; |
| 9267 | |
| 9268 | prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT); |
| 9269 | if (IS_ERR(prog)) |
| 9270 | return PTR_ERR(prog); |
| 9271 | |
| 9272 | event->prog = prog; |
| 9273 | event->orig_overflow_handler = READ_ONCE(event->overflow_handler); |
| 9274 | WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); |
| 9275 | return 0; |
| 9276 | } |
| 9277 | |
| 9278 | static void perf_event_free_bpf_handler(struct perf_event *event) |
| 9279 | { |
| 9280 | struct bpf_prog *prog = event->prog; |
| 9281 | |
| 9282 | if (!prog) |
| 9283 | return; |
| 9284 | |
| 9285 | WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); |
| 9286 | event->prog = NULL; |
| 9287 | bpf_prog_put(prog); |
| 9288 | } |
| 9289 | #else |
| 9290 | static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) |
| 9291 | { |
| 9292 | return -EOPNOTSUPP; |
| 9293 | } |
| 9294 | static void perf_event_free_bpf_handler(struct perf_event *event) |
| 9295 | { |
| 9296 | } |
| 9297 | #endif |
| 9298 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9299 | /* |
| 9300 | * returns true if the event is a tracepoint, or a kprobe/upprobe created |
| 9301 | * with perf_event_open() |
| 9302 | */ |
| 9303 | static inline bool perf_event_is_tracing(struct perf_event *event) |
| 9304 | { |
| 9305 | if (event->pmu == &perf_tracepoint) |
| 9306 | return true; |
| 9307 | #ifdef CONFIG_KPROBE_EVENTS |
| 9308 | if (event->pmu == &perf_kprobe) |
| 9309 | return true; |
| 9310 | #endif |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9311 | #ifdef CONFIG_UPROBE_EVENTS |
| 9312 | if (event->pmu == &perf_uprobe) |
| 9313 | return true; |
| 9314 | #endif |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9315 | return false; |
| 9316 | } |
| 9317 | |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9318 | static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) |
| 9319 | { |
Yonghong Song | cf5f5ce | 2017-08-04 16:00:09 -0700 | [diff] [blame] | 9320 | bool is_kprobe, is_tracepoint, is_syscall_tp; |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9321 | struct bpf_prog *prog; |
Yonghong Song | e87c6bc | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 9322 | int ret; |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9323 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9324 | if (!perf_event_is_tracing(event)) |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 9325 | return perf_event_set_bpf_handler(event, prog_fd); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9326 | |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 9327 | is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; |
| 9328 | is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; |
Yonghong Song | cf5f5ce | 2017-08-04 16:00:09 -0700 | [diff] [blame] | 9329 | is_syscall_tp = is_syscall_trace_event(event->tp_event); |
| 9330 | if (!is_kprobe && !is_tracepoint && !is_syscall_tp) |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 9331 | /* bpf programs can only be attached to u/kprobe or tracepoint */ |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9332 | return -EINVAL; |
| 9333 | |
| 9334 | prog = bpf_prog_get(prog_fd); |
| 9335 | if (IS_ERR(prog)) |
| 9336 | return PTR_ERR(prog); |
| 9337 | |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 9338 | if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) || |
Yonghong Song | cf5f5ce | 2017-08-04 16:00:09 -0700 | [diff] [blame] | 9339 | (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) || |
| 9340 | (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) { |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9341 | /* valid fd, but invalid bpf program type */ |
| 9342 | bpf_prog_put(prog); |
| 9343 | return -EINVAL; |
| 9344 | } |
| 9345 | |
Josef Bacik | 9802d86 | 2017-12-11 11:36:48 -0500 | [diff] [blame] | 9346 | /* Kprobe override only works for kprobes, not uprobes. */ |
| 9347 | if (prog->kprobe_override && |
| 9348 | !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) { |
| 9349 | bpf_prog_put(prog); |
| 9350 | return -EINVAL; |
| 9351 | } |
| 9352 | |
Yonghong Song | cf5f5ce | 2017-08-04 16:00:09 -0700 | [diff] [blame] | 9353 | if (is_tracepoint || is_syscall_tp) { |
Alexei Starovoitov | 32bbe00 | 2016-04-06 18:43:28 -0700 | [diff] [blame] | 9354 | int off = trace_event_get_offsets(event->tp_event); |
| 9355 | |
| 9356 | if (prog->aux->max_ctx_offset > off) { |
| 9357 | bpf_prog_put(prog); |
| 9358 | return -EACCES; |
| 9359 | } |
| 9360 | } |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9361 | |
Yonghong Song | e87c6bc | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 9362 | ret = perf_event_attach_bpf_prog(event, prog); |
| 9363 | if (ret) |
| 9364 | bpf_prog_put(prog); |
| 9365 | return ret; |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9366 | } |
| 9367 | |
| 9368 | static void perf_event_free_bpf_prog(struct perf_event *event) |
| 9369 | { |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9370 | if (!perf_event_is_tracing(event)) { |
Yonghong Song | 0b4c684 | 2017-10-23 23:53:07 -0700 | [diff] [blame] | 9371 | perf_event_free_bpf_handler(event); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9372 | return; |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9373 | } |
Yonghong Song | e87c6bc | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 9374 | perf_event_detach_bpf_prog(event); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9375 | } |
| 9376 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9377 | #else |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 9378 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9379 | static inline void perf_tp_register(void) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9380 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9381 | } |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 9382 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 9383 | static void perf_event_free_filter(struct perf_event *event) |
| 9384 | { |
| 9385 | } |
| 9386 | |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 9387 | static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) |
| 9388 | { |
| 9389 | return -ENOENT; |
| 9390 | } |
| 9391 | |
| 9392 | static void perf_event_free_bpf_prog(struct perf_event *event) |
| 9393 | { |
| 9394 | } |
Li Zefan | 07b139c | 2009-12-21 14:27:35 +0800 | [diff] [blame] | 9395 | #endif /* CONFIG_EVENT_TRACING */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9396 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 9397 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 9398 | void perf_bp_event(struct perf_event *bp, void *data) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 9399 | { |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 9400 | struct perf_sample_data sample; |
| 9401 | struct pt_regs *regs = data; |
| 9402 | |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 9403 | perf_sample_data_init(&sample, bp->attr.bp_addr, 0); |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 9404 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9405 | if (!bp->hw.state && !perf_exclude_event(bp, regs)) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9406 | perf_swevent_event(bp, 1, &sample, regs); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 9407 | } |
| 9408 | #endif |
| 9409 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9410 | /* |
| 9411 | * Allocate a new address filter |
| 9412 | */ |
| 9413 | static struct perf_addr_filter * |
| 9414 | perf_addr_filter_new(struct perf_event *event, struct list_head *filters) |
| 9415 | { |
| 9416 | int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); |
| 9417 | struct perf_addr_filter *filter; |
| 9418 | |
| 9419 | filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node); |
| 9420 | if (!filter) |
| 9421 | return NULL; |
| 9422 | |
| 9423 | INIT_LIST_HEAD(&filter->entry); |
| 9424 | list_add_tail(&filter->entry, filters); |
| 9425 | |
| 9426 | return filter; |
| 9427 | } |
| 9428 | |
| 9429 | static void free_filters_list(struct list_head *filters) |
| 9430 | { |
| 9431 | struct perf_addr_filter *filter, *iter; |
| 9432 | |
| 9433 | list_for_each_entry_safe(filter, iter, filters, entry) { |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 9434 | path_put(&filter->path); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9435 | list_del(&filter->entry); |
| 9436 | kfree(filter); |
| 9437 | } |
| 9438 | } |
| 9439 | |
| 9440 | /* |
| 9441 | * Free existing address filters and optionally install new ones |
| 9442 | */ |
| 9443 | static void perf_addr_filters_splice(struct perf_event *event, |
| 9444 | struct list_head *head) |
| 9445 | { |
| 9446 | unsigned long flags; |
| 9447 | LIST_HEAD(list); |
| 9448 | |
| 9449 | if (!has_addr_filter(event)) |
| 9450 | return; |
| 9451 | |
| 9452 | /* don't bother with children, they don't have their own filters */ |
| 9453 | if (event->parent) |
| 9454 | return; |
| 9455 | |
| 9456 | raw_spin_lock_irqsave(&event->addr_filters.lock, flags); |
| 9457 | |
| 9458 | list_splice_init(&event->addr_filters.list, &list); |
| 9459 | if (head) |
| 9460 | list_splice(head, &event->addr_filters.list); |
| 9461 | |
| 9462 | raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); |
| 9463 | |
| 9464 | free_filters_list(&list); |
| 9465 | } |
| 9466 | |
| 9467 | /* |
| 9468 | * Scan through mm's vmas and see if one of them matches the |
| 9469 | * @filter; if so, adjust filter's address range. |
| 9470 | * Called with mm::mmap_sem down for reading. |
| 9471 | */ |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 9472 | static void perf_addr_filter_apply(struct perf_addr_filter *filter, |
| 9473 | struct mm_struct *mm, |
| 9474 | struct perf_addr_filter_range *fr) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9475 | { |
| 9476 | struct vm_area_struct *vma; |
| 9477 | |
| 9478 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 9479 | if (!vma->vm_file) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9480 | continue; |
| 9481 | |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 9482 | if (perf_addr_filter_vma_adjust(filter, vma, fr)) |
| 9483 | return; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9484 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9485 | } |
| 9486 | |
| 9487 | /* |
| 9488 | * Update event's address range filters based on the |
| 9489 | * task's existing mappings, if any. |
| 9490 | */ |
| 9491 | static void perf_event_addr_filters_apply(struct perf_event *event) |
| 9492 | { |
| 9493 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 9494 | struct task_struct *task = READ_ONCE(event->ctx->task); |
| 9495 | struct perf_addr_filter *filter; |
| 9496 | struct mm_struct *mm = NULL; |
| 9497 | unsigned int count = 0; |
| 9498 | unsigned long flags; |
| 9499 | |
| 9500 | /* |
| 9501 | * We may observe TASK_TOMBSTONE, which means that the event tear-down |
| 9502 | * will stop on the parent's child_mutex that our caller is also holding |
| 9503 | */ |
| 9504 | if (task == TASK_TOMBSTONE) |
| 9505 | return; |
| 9506 | |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 9507 | if (ifh->nr_file_filters) { |
| 9508 | mm = get_task_mm(event->ctx->task); |
| 9509 | if (!mm) |
| 9510 | goto restart; |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 9511 | |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 9512 | down_read(&mm->mmap_sem); |
| 9513 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9514 | |
| 9515 | raw_spin_lock_irqsave(&ifh->lock, flags); |
| 9516 | list_for_each_entry(filter, &ifh->list, entry) { |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 9517 | if (filter->path.dentry) { |
| 9518 | /* |
| 9519 | * Adjust base offset if the filter is associated to a |
| 9520 | * binary that needs to be mapped: |
| 9521 | */ |
| 9522 | event->addr_filter_ranges[count].start = 0; |
| 9523 | event->addr_filter_ranges[count].size = 0; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9524 | |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 9525 | perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 9526 | } else { |
| 9527 | event->addr_filter_ranges[count].start = filter->offset; |
| 9528 | event->addr_filter_ranges[count].size = filter->size; |
| 9529 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9530 | |
| 9531 | count++; |
| 9532 | } |
| 9533 | |
| 9534 | event->addr_filters_gen++; |
| 9535 | raw_spin_unlock_irqrestore(&ifh->lock, flags); |
| 9536 | |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 9537 | if (ifh->nr_file_filters) { |
| 9538 | up_read(&mm->mmap_sem); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9539 | |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 9540 | mmput(mm); |
| 9541 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9542 | |
| 9543 | restart: |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 9544 | perf_event_stop(event, 1); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9545 | } |
| 9546 | |
| 9547 | /* |
| 9548 | * Address range filtering: limiting the data to certain |
| 9549 | * instruction address ranges. Filters are ioctl()ed to us from |
| 9550 | * userspace as ascii strings. |
| 9551 | * |
| 9552 | * Filter string format: |
| 9553 | * |
| 9554 | * ACTION RANGE_SPEC |
| 9555 | * where ACTION is one of the |
| 9556 | * * "filter": limit the trace to this region |
| 9557 | * * "start": start tracing from this address |
| 9558 | * * "stop": stop tracing at this address/region; |
| 9559 | * RANGE_SPEC is |
| 9560 | * * for kernel addresses: <start address>[/<size>] |
| 9561 | * * for object files: <start address>[/<size>]@</path/to/object/file> |
| 9562 | * |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 9563 | * if <size> is not specified or is zero, the range is treated as a single |
| 9564 | * address; not valid for ACTION=="filter". |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9565 | */ |
| 9566 | enum { |
Alexander Shishkin | e96271f | 2016-11-18 13:38:43 +0200 | [diff] [blame] | 9567 | IF_ACT_NONE = -1, |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9568 | IF_ACT_FILTER, |
| 9569 | IF_ACT_START, |
| 9570 | IF_ACT_STOP, |
| 9571 | IF_SRC_FILE, |
| 9572 | IF_SRC_KERNEL, |
| 9573 | IF_SRC_FILEADDR, |
| 9574 | IF_SRC_KERNELADDR, |
| 9575 | }; |
| 9576 | |
| 9577 | enum { |
| 9578 | IF_STATE_ACTION = 0, |
| 9579 | IF_STATE_SOURCE, |
| 9580 | IF_STATE_END, |
| 9581 | }; |
| 9582 | |
| 9583 | static const match_table_t if_tokens = { |
| 9584 | { IF_ACT_FILTER, "filter" }, |
| 9585 | { IF_ACT_START, "start" }, |
| 9586 | { IF_ACT_STOP, "stop" }, |
| 9587 | { IF_SRC_FILE, "%u/%u@%s" }, |
| 9588 | { IF_SRC_KERNEL, "%u/%u" }, |
| 9589 | { IF_SRC_FILEADDR, "%u@%s" }, |
| 9590 | { IF_SRC_KERNELADDR, "%u" }, |
Alexander Shishkin | e96271f | 2016-11-18 13:38:43 +0200 | [diff] [blame] | 9591 | { IF_ACT_NONE, NULL }, |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9592 | }; |
| 9593 | |
| 9594 | /* |
| 9595 | * Address filter string parser |
| 9596 | */ |
| 9597 | static int |
| 9598 | perf_event_parse_addr_filter(struct perf_event *event, char *fstr, |
| 9599 | struct list_head *filters) |
| 9600 | { |
| 9601 | struct perf_addr_filter *filter = NULL; |
| 9602 | char *start, *orig, *filename = NULL; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9603 | substring_t args[MAX_OPT_ARGS]; |
| 9604 | int state = IF_STATE_ACTION, token; |
| 9605 | unsigned int kernel = 0; |
| 9606 | int ret = -EINVAL; |
| 9607 | |
| 9608 | orig = fstr = kstrdup(fstr, GFP_KERNEL); |
| 9609 | if (!fstr) |
| 9610 | return -ENOMEM; |
| 9611 | |
| 9612 | while ((start = strsep(&fstr, " ,\n")) != NULL) { |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 9613 | static const enum perf_addr_filter_action_t actions[] = { |
| 9614 | [IF_ACT_FILTER] = PERF_ADDR_FILTER_ACTION_FILTER, |
| 9615 | [IF_ACT_START] = PERF_ADDR_FILTER_ACTION_START, |
| 9616 | [IF_ACT_STOP] = PERF_ADDR_FILTER_ACTION_STOP, |
| 9617 | }; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9618 | ret = -EINVAL; |
| 9619 | |
| 9620 | if (!*start) |
| 9621 | continue; |
| 9622 | |
| 9623 | /* filter definition begins */ |
| 9624 | if (state == IF_STATE_ACTION) { |
| 9625 | filter = perf_addr_filter_new(event, filters); |
| 9626 | if (!filter) |
| 9627 | goto fail; |
| 9628 | } |
| 9629 | |
| 9630 | token = match_token(start, if_tokens, args); |
| 9631 | switch (token) { |
| 9632 | case IF_ACT_FILTER: |
| 9633 | case IF_ACT_START: |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9634 | case IF_ACT_STOP: |
| 9635 | if (state != IF_STATE_ACTION) |
| 9636 | goto fail; |
| 9637 | |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 9638 | filter->action = actions[token]; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9639 | state = IF_STATE_SOURCE; |
| 9640 | break; |
| 9641 | |
| 9642 | case IF_SRC_KERNELADDR: |
| 9643 | case IF_SRC_KERNEL: |
| 9644 | kernel = 1; |
Gustavo A. R. Silva | 10c3405 | 2019-02-12 14:54:30 -0600 | [diff] [blame] | 9645 | /* fall through */ |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9646 | |
| 9647 | case IF_SRC_FILEADDR: |
| 9648 | case IF_SRC_FILE: |
| 9649 | if (state != IF_STATE_SOURCE) |
| 9650 | goto fail; |
| 9651 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9652 | *args[0].to = 0; |
| 9653 | ret = kstrtoul(args[0].from, 0, &filter->offset); |
| 9654 | if (ret) |
| 9655 | goto fail; |
| 9656 | |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 9657 | if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) { |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9658 | *args[1].to = 0; |
| 9659 | ret = kstrtoul(args[1].from, 0, &filter->size); |
| 9660 | if (ret) |
| 9661 | goto fail; |
| 9662 | } |
| 9663 | |
Mathieu Poirier | 4059ffd | 2016-07-18 10:43:05 -0600 | [diff] [blame] | 9664 | if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 9665 | int fpos = token == IF_SRC_FILE ? 2 : 1; |
Mathieu Poirier | 4059ffd | 2016-07-18 10:43:05 -0600 | [diff] [blame] | 9666 | |
| 9667 | filename = match_strdup(&args[fpos]); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9668 | if (!filename) { |
| 9669 | ret = -ENOMEM; |
| 9670 | goto fail; |
| 9671 | } |
| 9672 | } |
| 9673 | |
| 9674 | state = IF_STATE_END; |
| 9675 | break; |
| 9676 | |
| 9677 | default: |
| 9678 | goto fail; |
| 9679 | } |
| 9680 | |
| 9681 | /* |
| 9682 | * Filter definition is fully parsed, validate and install it. |
| 9683 | * Make sure that it doesn't contradict itself or the event's |
| 9684 | * attribute. |
| 9685 | */ |
| 9686 | if (state == IF_STATE_END) { |
Alexander Shishkin | 9ccbfbb | 2017-01-26 11:40:56 +0200 | [diff] [blame] | 9687 | ret = -EINVAL; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9688 | if (kernel && event->attr.exclude_kernel) |
| 9689 | goto fail; |
| 9690 | |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 9691 | /* |
| 9692 | * ACTION "filter" must have a non-zero length region |
| 9693 | * specified. |
| 9694 | */ |
| 9695 | if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER && |
| 9696 | !filter->size) |
| 9697 | goto fail; |
| 9698 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9699 | if (!kernel) { |
| 9700 | if (!filename) |
| 9701 | goto fail; |
| 9702 | |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 9703 | /* |
| 9704 | * For now, we only support file-based filters |
| 9705 | * in per-task events; doing so for CPU-wide |
| 9706 | * events requires additional context switching |
| 9707 | * trickery, since same object code will be |
| 9708 | * mapped at different virtual addresses in |
| 9709 | * different processes. |
| 9710 | */ |
| 9711 | ret = -EOPNOTSUPP; |
| 9712 | if (!event->ctx->task) |
| 9713 | goto fail_free_name; |
| 9714 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9715 | /* look up the path and grab its inode */ |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 9716 | ret = kern_path(filename, LOOKUP_FOLLOW, |
| 9717 | &filter->path); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9718 | if (ret) |
| 9719 | goto fail_free_name; |
| 9720 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9721 | kfree(filename); |
| 9722 | filename = NULL; |
| 9723 | |
| 9724 | ret = -EINVAL; |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 9725 | if (!filter->path.dentry || |
| 9726 | !S_ISREG(d_inode(filter->path.dentry) |
| 9727 | ->i_mode)) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9728 | goto fail; |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 9729 | |
| 9730 | event->addr_filters.nr_file_filters++; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9731 | } |
| 9732 | |
| 9733 | /* ready to consume more filters */ |
| 9734 | state = IF_STATE_ACTION; |
| 9735 | filter = NULL; |
| 9736 | } |
| 9737 | } |
| 9738 | |
| 9739 | if (state != IF_STATE_ACTION) |
| 9740 | goto fail; |
| 9741 | |
| 9742 | kfree(orig); |
| 9743 | |
| 9744 | return 0; |
| 9745 | |
| 9746 | fail_free_name: |
| 9747 | kfree(filename); |
| 9748 | fail: |
| 9749 | free_filters_list(filters); |
| 9750 | kfree(orig); |
| 9751 | |
| 9752 | return ret; |
| 9753 | } |
| 9754 | |
| 9755 | static int |
| 9756 | perf_event_set_addr_filter(struct perf_event *event, char *filter_str) |
| 9757 | { |
| 9758 | LIST_HEAD(filters); |
| 9759 | int ret; |
| 9760 | |
| 9761 | /* |
| 9762 | * Since this is called in perf_ioctl() path, we're already holding |
| 9763 | * ctx::mutex. |
| 9764 | */ |
| 9765 | lockdep_assert_held(&event->ctx->mutex); |
| 9766 | |
| 9767 | if (WARN_ON_ONCE(event->parent)) |
| 9768 | return -EINVAL; |
| 9769 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9770 | ret = perf_event_parse_addr_filter(event, filter_str, &filters); |
| 9771 | if (ret) |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 9772 | goto fail_clear_files; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9773 | |
| 9774 | ret = event->pmu->addr_filters_validate(&filters); |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 9775 | if (ret) |
| 9776 | goto fail_free_filters; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9777 | |
| 9778 | /* remove existing filters, if any */ |
| 9779 | perf_addr_filters_splice(event, &filters); |
| 9780 | |
| 9781 | /* install new filters */ |
| 9782 | perf_event_for_each_child(event, perf_event_addr_filters_apply); |
| 9783 | |
| 9784 | return ret; |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 9785 | |
| 9786 | fail_free_filters: |
| 9787 | free_filters_list(&filters); |
| 9788 | |
| 9789 | fail_clear_files: |
| 9790 | event->addr_filters.nr_file_filters = 0; |
| 9791 | |
| 9792 | return ret; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9793 | } |
| 9794 | |
Alexander Shishkin | c796bbb | 2016-04-27 18:44:42 +0300 | [diff] [blame] | 9795 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) |
| 9796 | { |
Alexander Shishkin | c796bbb | 2016-04-27 18:44:42 +0300 | [diff] [blame] | 9797 | int ret = -EINVAL; |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9798 | char *filter_str; |
Alexander Shishkin | c796bbb | 2016-04-27 18:44:42 +0300 | [diff] [blame] | 9799 | |
| 9800 | filter_str = strndup_user(arg, PAGE_SIZE); |
| 9801 | if (IS_ERR(filter_str)) |
| 9802 | return PTR_ERR(filter_str); |
| 9803 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9804 | #ifdef CONFIG_EVENT_TRACING |
| 9805 | if (perf_event_is_tracing(event)) { |
| 9806 | struct perf_event_context *ctx = event->ctx; |
| 9807 | |
| 9808 | /* |
| 9809 | * Beware, here be dragons!! |
| 9810 | * |
| 9811 | * the tracepoint muck will deadlock against ctx->mutex, but |
| 9812 | * the tracepoint stuff does not actually need it. So |
| 9813 | * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we |
| 9814 | * already have a reference on ctx. |
| 9815 | * |
| 9816 | * This can result in event getting moved to a different ctx, |
| 9817 | * but that does not affect the tracepoint state. |
| 9818 | */ |
| 9819 | mutex_unlock(&ctx->mutex); |
| 9820 | ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); |
| 9821 | mutex_lock(&ctx->mutex); |
| 9822 | } else |
| 9823 | #endif |
| 9824 | if (has_addr_filter(event)) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 9825 | ret = perf_event_set_addr_filter(event, filter_str); |
Alexander Shishkin | c796bbb | 2016-04-27 18:44:42 +0300 | [diff] [blame] | 9826 | |
| 9827 | kfree(filter_str); |
| 9828 | return ret; |
| 9829 | } |
| 9830 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9831 | /* |
| 9832 | * hrtimer based swevent callback |
| 9833 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9834 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9835 | static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9836 | { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9837 | enum hrtimer_restart ret = HRTIMER_RESTART; |
| 9838 | struct perf_sample_data data; |
| 9839 | struct pt_regs *regs; |
| 9840 | struct perf_event *event; |
| 9841 | u64 period; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9842 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9843 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 9844 | |
| 9845 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 9846 | return HRTIMER_NORESTART; |
| 9847 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9848 | event->pmu->read(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9849 | |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 9850 | perf_sample_data_init(&data, 0, event->hw.last_period); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9851 | regs = get_irq_regs(); |
| 9852 | |
| 9853 | if (regs && !perf_exclude_event(event, regs)) { |
Paul E. McKenney | 77aeeeb | 2011-11-10 16:02:52 -0800 | [diff] [blame] | 9854 | if (!(event->attr.exclude_idle && is_idle_task(current))) |
Robert Richter | 33b07b8 | 2012-04-05 18:24:43 +0200 | [diff] [blame] | 9855 | if (__perf_event_overflow(event, 1, &data, regs)) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9856 | ret = HRTIMER_NORESTART; |
| 9857 | } |
| 9858 | |
| 9859 | period = max_t(u64, 10000, event->hw.sample_period); |
| 9860 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); |
| 9861 | |
| 9862 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9863 | } |
| 9864 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9865 | static void perf_swevent_start_hrtimer(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9866 | { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9867 | struct hw_perf_event *hwc = &event->hw; |
Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 9868 | s64 period; |
| 9869 | |
| 9870 | if (!is_sampling_event(event)) |
| 9871 | return; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9872 | |
Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 9873 | period = local64_read(&hwc->period_left); |
| 9874 | if (period) { |
| 9875 | if (period < 0) |
| 9876 | period = 10000; |
Peter Zijlstra | fa407f3 | 2010-06-24 12:35:12 +0200 | [diff] [blame] | 9877 | |
Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 9878 | local64_set(&hwc->period_left, 0); |
| 9879 | } else { |
| 9880 | period = max_t(u64, 10000, hwc->sample_period); |
| 9881 | } |
Thomas Gleixner | 3497d20 | 2015-04-14 21:09:03 +0000 | [diff] [blame] | 9882 | hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), |
Sebastian Andrzej Siewior | 30f9028 | 2019-07-26 20:30:53 +0200 | [diff] [blame] | 9883 | HRTIMER_MODE_REL_PINNED_HARD); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9884 | } |
| 9885 | |
| 9886 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) |
| 9887 | { |
| 9888 | struct hw_perf_event *hwc = &event->hw; |
| 9889 | |
Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 9890 | if (is_sampling_event(event)) { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9891 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); |
Peter Zijlstra | fa407f3 | 2010-06-24 12:35:12 +0200 | [diff] [blame] | 9892 | local64_set(&hwc->period_left, ktime_to_ns(remaining)); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9893 | |
| 9894 | hrtimer_cancel(&hwc->hrtimer); |
| 9895 | } |
| 9896 | } |
| 9897 | |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 9898 | static void perf_swevent_init_hrtimer(struct perf_event *event) |
| 9899 | { |
| 9900 | struct hw_perf_event *hwc = &event->hw; |
| 9901 | |
| 9902 | if (!is_sampling_event(event)) |
| 9903 | return; |
| 9904 | |
Sebastian Andrzej Siewior | 30f9028 | 2019-07-26 20:30:53 +0200 | [diff] [blame] | 9905 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 9906 | hwc->hrtimer.function = perf_swevent_hrtimer; |
| 9907 | |
| 9908 | /* |
| 9909 | * Since hrtimers have a fixed rate, we can do a static freq->period |
| 9910 | * mapping and avoid the whole period adjust feedback stuff. |
| 9911 | */ |
| 9912 | if (event->attr.freq) { |
| 9913 | long freq = event->attr.sample_freq; |
| 9914 | |
| 9915 | event->attr.sample_period = NSEC_PER_SEC / freq; |
| 9916 | hwc->sample_period = event->attr.sample_period; |
| 9917 | local64_set(&hwc->period_left, hwc->sample_period); |
Namhyung Kim | 778141e | 2013-03-18 11:41:46 +0900 | [diff] [blame] | 9918 | hwc->last_period = hwc->sample_period; |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 9919 | event->attr.freq = 0; |
| 9920 | } |
| 9921 | } |
| 9922 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9923 | /* |
| 9924 | * Software event: cpu wall time clock |
| 9925 | */ |
| 9926 | |
| 9927 | static void cpu_clock_event_update(struct perf_event *event) |
| 9928 | { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9929 | s64 prev; |
| 9930 | u64 now; |
| 9931 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9932 | now = local_clock(); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9933 | prev = local64_xchg(&event->hw.prev_count, now); |
| 9934 | local64_add(now - prev, &event->count); |
| 9935 | } |
| 9936 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9937 | static void cpu_clock_event_start(struct perf_event *event, int flags) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9938 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9939 | local64_set(&event->hw.prev_count, local_clock()); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9940 | perf_swevent_start_hrtimer(event); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9941 | } |
| 9942 | |
| 9943 | static void cpu_clock_event_stop(struct perf_event *event, int flags) |
| 9944 | { |
| 9945 | perf_swevent_cancel_hrtimer(event); |
| 9946 | cpu_clock_event_update(event); |
| 9947 | } |
| 9948 | |
| 9949 | static int cpu_clock_event_add(struct perf_event *event, int flags) |
| 9950 | { |
| 9951 | if (flags & PERF_EF_START) |
| 9952 | cpu_clock_event_start(event, flags); |
Shaohua Li | 6a694a6 | 2015-02-05 15:55:32 -0800 | [diff] [blame] | 9953 | perf_event_update_userpage(event); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9954 | |
| 9955 | return 0; |
| 9956 | } |
| 9957 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9958 | static void cpu_clock_event_del(struct perf_event *event, int flags) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9959 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9960 | cpu_clock_event_stop(event, flags); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9961 | } |
| 9962 | |
| 9963 | static void cpu_clock_event_read(struct perf_event *event) |
| 9964 | { |
| 9965 | cpu_clock_event_update(event); |
| 9966 | } |
| 9967 | |
| 9968 | static int cpu_clock_event_init(struct perf_event *event) |
| 9969 | { |
| 9970 | if (event->attr.type != PERF_TYPE_SOFTWARE) |
| 9971 | return -ENOENT; |
| 9972 | |
| 9973 | if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) |
| 9974 | return -ENOENT; |
| 9975 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 9976 | /* |
| 9977 | * no branch sampling for software events |
| 9978 | */ |
| 9979 | if (has_branch_stack(event)) |
| 9980 | return -EOPNOTSUPP; |
| 9981 | |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 9982 | perf_swevent_init_hrtimer(event); |
| 9983 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9984 | return 0; |
| 9985 | } |
| 9986 | |
| 9987 | static struct pmu perf_cpu_clock = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 9988 | .task_ctx_nr = perf_sw_context, |
| 9989 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 9990 | .capabilities = PERF_PMU_CAP_NO_NMI, |
| 9991 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9992 | .event_init = cpu_clock_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9993 | .add = cpu_clock_event_add, |
| 9994 | .del = cpu_clock_event_del, |
| 9995 | .start = cpu_clock_event_start, |
| 9996 | .stop = cpu_clock_event_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9997 | .read = cpu_clock_event_read, |
| 9998 | }; |
| 9999 | |
| 10000 | /* |
| 10001 | * Software event: task time clock |
| 10002 | */ |
| 10003 | |
| 10004 | static void task_clock_event_update(struct perf_event *event, u64 now) |
| 10005 | { |
| 10006 | u64 prev; |
| 10007 | s64 delta; |
| 10008 | |
| 10009 | prev = local64_xchg(&event->hw.prev_count, now); |
| 10010 | delta = now - prev; |
| 10011 | local64_add(delta, &event->count); |
| 10012 | } |
| 10013 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10014 | static void task_clock_event_start(struct perf_event *event, int flags) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10015 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10016 | local64_set(&event->hw.prev_count, event->ctx->time); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10017 | perf_swevent_start_hrtimer(event); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10018 | } |
| 10019 | |
| 10020 | static void task_clock_event_stop(struct perf_event *event, int flags) |
| 10021 | { |
| 10022 | perf_swevent_cancel_hrtimer(event); |
| 10023 | task_clock_event_update(event, event->ctx->time); |
| 10024 | } |
| 10025 | |
| 10026 | static int task_clock_event_add(struct perf_event *event, int flags) |
| 10027 | { |
| 10028 | if (flags & PERF_EF_START) |
| 10029 | task_clock_event_start(event, flags); |
Shaohua Li | 6a694a6 | 2015-02-05 15:55:32 -0800 | [diff] [blame] | 10030 | perf_event_update_userpage(event); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10031 | |
| 10032 | return 0; |
| 10033 | } |
| 10034 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10035 | static void task_clock_event_del(struct perf_event *event, int flags) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10036 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10037 | task_clock_event_stop(event, PERF_EF_UPDATE); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10038 | } |
| 10039 | |
| 10040 | static void task_clock_event_read(struct perf_event *event) |
| 10041 | { |
Peter Zijlstra | 768a06e | 2011-02-22 16:52:24 +0100 | [diff] [blame] | 10042 | u64 now = perf_clock(); |
| 10043 | u64 delta = now - event->ctx->timestamp; |
| 10044 | u64 time = event->ctx->time + delta; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10045 | |
| 10046 | task_clock_event_update(event, time); |
| 10047 | } |
| 10048 | |
| 10049 | static int task_clock_event_init(struct perf_event *event) |
| 10050 | { |
| 10051 | if (event->attr.type != PERF_TYPE_SOFTWARE) |
| 10052 | return -ENOENT; |
| 10053 | |
| 10054 | if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) |
| 10055 | return -ENOENT; |
| 10056 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 10057 | /* |
| 10058 | * no branch sampling for software events |
| 10059 | */ |
| 10060 | if (has_branch_stack(event)) |
| 10061 | return -EOPNOTSUPP; |
| 10062 | |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 10063 | perf_swevent_init_hrtimer(event); |
| 10064 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10065 | return 0; |
| 10066 | } |
| 10067 | |
| 10068 | static struct pmu perf_task_clock = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 10069 | .task_ctx_nr = perf_sw_context, |
| 10070 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 10071 | .capabilities = PERF_PMU_CAP_NO_NMI, |
| 10072 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10073 | .event_init = task_clock_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10074 | .add = task_clock_event_add, |
| 10075 | .del = task_clock_event_del, |
| 10076 | .start = task_clock_event_start, |
| 10077 | .stop = task_clock_event_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10078 | .read = task_clock_event_read, |
| 10079 | }; |
| 10080 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10081 | static void perf_pmu_nop_void(struct pmu *pmu) |
| 10082 | { |
| 10083 | } |
| 10084 | |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10085 | static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) |
| 10086 | { |
| 10087 | } |
| 10088 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10089 | static int perf_pmu_nop_int(struct pmu *pmu) |
| 10090 | { |
| 10091 | return 0; |
| 10092 | } |
| 10093 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 10094 | static int perf_event_nop_int(struct perf_event *event, u64 value) |
| 10095 | { |
| 10096 | return 0; |
| 10097 | } |
| 10098 | |
Geliang Tang | 18ab2cd | 2015-09-27 23:25:50 +0800 | [diff] [blame] | 10099 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10100 | |
| 10101 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10102 | { |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10103 | __this_cpu_write(nop_txn_flags, flags); |
| 10104 | |
| 10105 | if (flags & ~PERF_PMU_TXN_ADD) |
| 10106 | return; |
| 10107 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10108 | perf_pmu_disable(pmu); |
| 10109 | } |
| 10110 | |
| 10111 | static int perf_pmu_commit_txn(struct pmu *pmu) |
| 10112 | { |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10113 | unsigned int flags = __this_cpu_read(nop_txn_flags); |
| 10114 | |
| 10115 | __this_cpu_write(nop_txn_flags, 0); |
| 10116 | |
| 10117 | if (flags & ~PERF_PMU_TXN_ADD) |
| 10118 | return 0; |
| 10119 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10120 | perf_pmu_enable(pmu); |
| 10121 | return 0; |
| 10122 | } |
| 10123 | |
| 10124 | static void perf_pmu_cancel_txn(struct pmu *pmu) |
| 10125 | { |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10126 | unsigned int flags = __this_cpu_read(nop_txn_flags); |
| 10127 | |
| 10128 | __this_cpu_write(nop_txn_flags, 0); |
| 10129 | |
| 10130 | if (flags & ~PERF_PMU_TXN_ADD) |
| 10131 | return; |
| 10132 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10133 | perf_pmu_enable(pmu); |
| 10134 | } |
| 10135 | |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 10136 | static int perf_event_idx_default(struct perf_event *event) |
| 10137 | { |
Peter Zijlstra | c719f56 | 2014-10-21 11:10:21 +0200 | [diff] [blame] | 10138 | return 0; |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 10139 | } |
| 10140 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 10141 | /* |
| 10142 | * Ensures all contexts with the same task_ctx_nr have the same |
| 10143 | * pmu_cpu_context too. |
| 10144 | */ |
Mark Rutland | 9e31704 | 2014-02-10 17:44:18 +0000 | [diff] [blame] | 10145 | static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 10146 | { |
| 10147 | struct pmu *pmu; |
| 10148 | |
| 10149 | if (ctxn < 0) |
| 10150 | return NULL; |
| 10151 | |
| 10152 | list_for_each_entry(pmu, &pmus, entry) { |
| 10153 | if (pmu->task_ctx_nr == ctxn) |
| 10154 | return pmu->pmu_cpu_context; |
| 10155 | } |
| 10156 | |
| 10157 | return NULL; |
| 10158 | } |
| 10159 | |
Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 10160 | static void free_pmu_context(struct pmu *pmu) |
| 10161 | { |
Will Deacon | df0062b | 2017-10-03 15:20:50 +0100 | [diff] [blame] | 10162 | /* |
| 10163 | * Static contexts such as perf_sw_context have a global lifetime |
| 10164 | * and may be shared between different PMUs. Avoid freeing them |
| 10165 | * when a single PMU is going away. |
| 10166 | */ |
| 10167 | if (pmu->task_ctx_nr > perf_invalid_context) |
| 10168 | return; |
| 10169 | |
Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 10170 | free_percpu(pmu->pmu_cpu_context); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 10171 | } |
Alexander Shishkin | 6e855cd | 2016-04-27 18:44:48 +0300 | [diff] [blame] | 10172 | |
| 10173 | /* |
| 10174 | * Let userspace know that this PMU supports address range filtering: |
| 10175 | */ |
| 10176 | static ssize_t nr_addr_filters_show(struct device *dev, |
| 10177 | struct device_attribute *attr, |
| 10178 | char *page) |
| 10179 | { |
| 10180 | struct pmu *pmu = dev_get_drvdata(dev); |
| 10181 | |
| 10182 | return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters); |
| 10183 | } |
| 10184 | DEVICE_ATTR_RO(nr_addr_filters); |
| 10185 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10186 | static struct idr pmu_idr; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 10187 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10188 | static ssize_t |
| 10189 | type_show(struct device *dev, struct device_attribute *attr, char *page) |
| 10190 | { |
| 10191 | struct pmu *pmu = dev_get_drvdata(dev); |
| 10192 | |
| 10193 | return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); |
| 10194 | } |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 10195 | static DEVICE_ATTR_RO(type); |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10196 | |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 10197 | static ssize_t |
| 10198 | perf_event_mux_interval_ms_show(struct device *dev, |
| 10199 | struct device_attribute *attr, |
| 10200 | char *page) |
| 10201 | { |
| 10202 | struct pmu *pmu = dev_get_drvdata(dev); |
| 10203 | |
| 10204 | return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); |
| 10205 | } |
| 10206 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 10207 | static DEFINE_MUTEX(mux_interval_mutex); |
| 10208 | |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 10209 | static ssize_t |
| 10210 | perf_event_mux_interval_ms_store(struct device *dev, |
| 10211 | struct device_attribute *attr, |
| 10212 | const char *buf, size_t count) |
| 10213 | { |
| 10214 | struct pmu *pmu = dev_get_drvdata(dev); |
| 10215 | int timer, cpu, ret; |
| 10216 | |
| 10217 | ret = kstrtoint(buf, 0, &timer); |
| 10218 | if (ret) |
| 10219 | return ret; |
| 10220 | |
| 10221 | if (timer < 1) |
| 10222 | return -EINVAL; |
| 10223 | |
| 10224 | /* same value, noting to do */ |
| 10225 | if (timer == pmu->hrtimer_interval_ms) |
| 10226 | return count; |
| 10227 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 10228 | mutex_lock(&mux_interval_mutex); |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 10229 | pmu->hrtimer_interval_ms = timer; |
| 10230 | |
| 10231 | /* update all cpuctx for this PMU */ |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 10232 | cpus_read_lock(); |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 10233 | for_each_online_cpu(cpu) { |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 10234 | struct perf_cpu_context *cpuctx; |
| 10235 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
| 10236 | cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); |
| 10237 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 10238 | cpu_function_call(cpu, |
| 10239 | (remote_function_f)perf_mux_hrtimer_restart, cpuctx); |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 10240 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 10241 | cpus_read_unlock(); |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 10242 | mutex_unlock(&mux_interval_mutex); |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 10243 | |
| 10244 | return count; |
| 10245 | } |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 10246 | static DEVICE_ATTR_RW(perf_event_mux_interval_ms); |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 10247 | |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 10248 | static struct attribute *pmu_dev_attrs[] = { |
| 10249 | &dev_attr_type.attr, |
| 10250 | &dev_attr_perf_event_mux_interval_ms.attr, |
| 10251 | NULL, |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10252 | }; |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 10253 | ATTRIBUTE_GROUPS(pmu_dev); |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10254 | |
| 10255 | static int pmu_bus_running; |
| 10256 | static struct bus_type pmu_bus = { |
| 10257 | .name = "event_source", |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 10258 | .dev_groups = pmu_dev_groups, |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10259 | }; |
| 10260 | |
| 10261 | static void pmu_dev_release(struct device *dev) |
| 10262 | { |
| 10263 | kfree(dev); |
| 10264 | } |
| 10265 | |
| 10266 | static int pmu_dev_alloc(struct pmu *pmu) |
| 10267 | { |
| 10268 | int ret = -ENOMEM; |
| 10269 | |
| 10270 | pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
| 10271 | if (!pmu->dev) |
| 10272 | goto out; |
| 10273 | |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 10274 | pmu->dev->groups = pmu->attr_groups; |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10275 | device_initialize(pmu->dev); |
| 10276 | ret = dev_set_name(pmu->dev, "%s", pmu->name); |
| 10277 | if (ret) |
| 10278 | goto free_dev; |
| 10279 | |
| 10280 | dev_set_drvdata(pmu->dev, pmu); |
| 10281 | pmu->dev->bus = &pmu_bus; |
| 10282 | pmu->dev->release = pmu_dev_release; |
| 10283 | ret = device_add(pmu->dev); |
| 10284 | if (ret) |
| 10285 | goto free_dev; |
| 10286 | |
Alexander Shishkin | 6e855cd | 2016-04-27 18:44:48 +0300 | [diff] [blame] | 10287 | /* For PMUs with address filters, throw in an extra attribute: */ |
| 10288 | if (pmu->nr_addr_filters) |
| 10289 | ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters); |
| 10290 | |
| 10291 | if (ret) |
| 10292 | goto del_dev; |
| 10293 | |
Jiri Olsa | f3a3a82 | 2019-05-12 17:55:11 +0200 | [diff] [blame] | 10294 | if (pmu->attr_update) |
| 10295 | ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update); |
| 10296 | |
| 10297 | if (ret) |
| 10298 | goto del_dev; |
| 10299 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10300 | out: |
| 10301 | return ret; |
| 10302 | |
Alexander Shishkin | 6e855cd | 2016-04-27 18:44:48 +0300 | [diff] [blame] | 10303 | del_dev: |
| 10304 | device_del(pmu->dev); |
| 10305 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10306 | free_dev: |
| 10307 | put_device(pmu->dev); |
| 10308 | goto out; |
| 10309 | } |
| 10310 | |
Peter Zijlstra | 547e9fd | 2011-01-19 12:51:39 +0100 | [diff] [blame] | 10311 | static struct lock_class_key cpuctx_mutex; |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 10312 | static struct lock_class_key cpuctx_lock; |
Peter Zijlstra | 547e9fd | 2011-01-19 12:51:39 +0100 | [diff] [blame] | 10313 | |
Mischa Jonker | 03d8e80 | 2013-06-04 11:45:48 +0200 | [diff] [blame] | 10314 | int perf_pmu_register(struct pmu *pmu, const char *name, int type) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10315 | { |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10316 | int cpu, ret, max = PERF_TYPE_MAX; |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 10317 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10318 | mutex_lock(&pmus_lock); |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 10319 | ret = -ENOMEM; |
| 10320 | pmu->pmu_disable_count = alloc_percpu(int); |
| 10321 | if (!pmu->pmu_disable_count) |
| 10322 | goto unlock; |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10323 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10324 | pmu->type = -1; |
| 10325 | if (!name) |
| 10326 | goto skip_type; |
| 10327 | pmu->name = name; |
| 10328 | |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10329 | if (type != PERF_TYPE_SOFTWARE) { |
| 10330 | if (type >= 0) |
| 10331 | max = type; |
| 10332 | |
| 10333 | ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); |
| 10334 | if (ret < 0) |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10335 | goto free_pdc; |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10336 | |
| 10337 | WARN_ON(type >= 0 && ret != type); |
| 10338 | |
| 10339 | type = ret; |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10340 | } |
| 10341 | pmu->type = type; |
| 10342 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10343 | if (pmu_bus_running) { |
| 10344 | ret = pmu_dev_alloc(pmu); |
| 10345 | if (ret) |
| 10346 | goto free_idr; |
| 10347 | } |
| 10348 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10349 | skip_type: |
Peter Zijlstra | 2665784 | 2016-03-22 22:09:18 +0100 | [diff] [blame] | 10350 | if (pmu->task_ctx_nr == perf_hw_context) { |
| 10351 | static int hw_context_taken = 0; |
| 10352 | |
Mark Rutland | 5101ef2 | 2016-04-26 11:33:46 +0100 | [diff] [blame] | 10353 | /* |
| 10354 | * Other than systems with heterogeneous CPUs, it never makes |
| 10355 | * sense for two PMUs to share perf_hw_context. PMUs which are |
| 10356 | * uncore must use perf_invalid_context. |
| 10357 | */ |
| 10358 | if (WARN_ON_ONCE(hw_context_taken && |
| 10359 | !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS))) |
Peter Zijlstra | 2665784 | 2016-03-22 22:09:18 +0100 | [diff] [blame] | 10360 | pmu->task_ctx_nr = perf_invalid_context; |
| 10361 | |
| 10362 | hw_context_taken = 1; |
| 10363 | } |
| 10364 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 10365 | pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); |
| 10366 | if (pmu->pmu_cpu_context) |
| 10367 | goto got_cpu_context; |
| 10368 | |
Wei Yongjun | c481420 | 2013-04-12 11:05:54 +0800 | [diff] [blame] | 10369 | ret = -ENOMEM; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 10370 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); |
| 10371 | if (!pmu->pmu_cpu_context) |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10372 | goto free_dev; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 10373 | |
| 10374 | for_each_possible_cpu(cpu) { |
| 10375 | struct perf_cpu_context *cpuctx; |
| 10376 | |
| 10377 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 10378 | __perf_event_init_context(&cpuctx->ctx); |
Peter Zijlstra | 547e9fd | 2011-01-19 12:51:39 +0100 | [diff] [blame] | 10379 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 10380 | lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 10381 | cpuctx->ctx.pmu = pmu; |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 10382 | cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 10383 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 10384 | __perf_mux_hrtimer_init(cpuctx, cpu); |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame^] | 10385 | |
| 10386 | cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default); |
| 10387 | cpuctx->heap = cpuctx->heap_default; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 10388 | } |
| 10389 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 10390 | got_cpu_context: |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10391 | if (!pmu->start_txn) { |
| 10392 | if (pmu->pmu_enable) { |
| 10393 | /* |
| 10394 | * If we have pmu_enable/pmu_disable calls, install |
| 10395 | * transaction stubs that use that to try and batch |
| 10396 | * hardware accesses. |
| 10397 | */ |
| 10398 | pmu->start_txn = perf_pmu_start_txn; |
| 10399 | pmu->commit_txn = perf_pmu_commit_txn; |
| 10400 | pmu->cancel_txn = perf_pmu_cancel_txn; |
| 10401 | } else { |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10402 | pmu->start_txn = perf_pmu_nop_txn; |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10403 | pmu->commit_txn = perf_pmu_nop_int; |
| 10404 | pmu->cancel_txn = perf_pmu_nop_void; |
| 10405 | } |
| 10406 | } |
| 10407 | |
| 10408 | if (!pmu->pmu_enable) { |
| 10409 | pmu->pmu_enable = perf_pmu_nop_void; |
| 10410 | pmu->pmu_disable = perf_pmu_nop_void; |
| 10411 | } |
| 10412 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 10413 | if (!pmu->check_period) |
| 10414 | pmu->check_period = perf_event_nop_int; |
| 10415 | |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 10416 | if (!pmu->event_idx) |
| 10417 | pmu->event_idx = perf_event_idx_default; |
| 10418 | |
Liang, Kan | d44f821 | 2019-10-22 11:13:09 +0200 | [diff] [blame] | 10419 | /* |
| 10420 | * Ensure the TYPE_SOFTWARE PMUs are at the head of the list, |
| 10421 | * since these cannot be in the IDR. This way the linear search |
| 10422 | * is fast, provided a valid software event is provided. |
| 10423 | */ |
| 10424 | if (type == PERF_TYPE_SOFTWARE || !name) |
| 10425 | list_add_rcu(&pmu->entry, &pmus); |
| 10426 | else |
| 10427 | list_add_tail_rcu(&pmu->entry, &pmus); |
| 10428 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 10429 | atomic_set(&pmu->exclusive_cnt, 0); |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 10430 | ret = 0; |
| 10431 | unlock: |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10432 | mutex_unlock(&pmus_lock); |
| 10433 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 10434 | return ret; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 10435 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 10436 | free_dev: |
| 10437 | device_del(pmu->dev); |
| 10438 | put_device(pmu->dev); |
| 10439 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10440 | free_idr: |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10441 | if (pmu->type != PERF_TYPE_SOFTWARE) |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10442 | idr_remove(&pmu_idr, pmu->type); |
| 10443 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 10444 | free_pdc: |
| 10445 | free_percpu(pmu->pmu_disable_count); |
| 10446 | goto unlock; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10447 | } |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 10448 | EXPORT_SYMBOL_GPL(perf_pmu_register); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10449 | |
| 10450 | void perf_pmu_unregister(struct pmu *pmu) |
| 10451 | { |
| 10452 | mutex_lock(&pmus_lock); |
| 10453 | list_del_rcu(&pmu->entry); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10454 | |
| 10455 | /* |
Peter Zijlstra | cde8e88 | 2010-09-13 11:06:55 +0200 | [diff] [blame] | 10456 | * We dereference the pmu list under both SRCU and regular RCU, so |
| 10457 | * synchronize against both of those. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10458 | */ |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10459 | synchronize_srcu(&pmus_srcu); |
Peter Zijlstra | cde8e88 | 2010-09-13 11:06:55 +0200 | [diff] [blame] | 10460 | synchronize_rcu(); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10461 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 10462 | free_percpu(pmu->pmu_disable_count); |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10463 | if (pmu->type != PERF_TYPE_SOFTWARE) |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10464 | idr_remove(&pmu_idr, pmu->type); |
Peter Zijlstra | a9f9772 | 2018-09-25 17:58:35 +0200 | [diff] [blame] | 10465 | if (pmu_bus_running) { |
Jiri Olsa | 0933840 | 2016-10-20 13:10:11 +0200 | [diff] [blame] | 10466 | if (pmu->nr_addr_filters) |
| 10467 | device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); |
| 10468 | device_del(pmu->dev); |
| 10469 | put_device(pmu->dev); |
| 10470 | } |
Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 10471 | free_pmu_context(pmu); |
Peter Zijlstra | a9f9772 | 2018-09-25 17:58:35 +0200 | [diff] [blame] | 10472 | mutex_unlock(&pmus_lock); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10473 | } |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 10474 | EXPORT_SYMBOL_GPL(perf_pmu_unregister); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10475 | |
Kan Liang | e321d02 | 2019-05-28 15:08:30 -0700 | [diff] [blame] | 10476 | static inline bool has_extended_regs(struct perf_event *event) |
| 10477 | { |
| 10478 | return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || |
| 10479 | (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); |
| 10480 | } |
| 10481 | |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 10482 | static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) |
| 10483 | { |
Peter Zijlstra | ccd41c8 | 2015-02-25 15:56:04 +0100 | [diff] [blame] | 10484 | struct perf_event_context *ctx = NULL; |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 10485 | int ret; |
| 10486 | |
| 10487 | if (!try_module_get(pmu->module)) |
| 10488 | return -ENODEV; |
Peter Zijlstra | ccd41c8 | 2015-02-25 15:56:04 +0100 | [diff] [blame] | 10489 | |
Peter Zijlstra | 0c7296c | 2018-01-09 21:23:02 +0100 | [diff] [blame] | 10490 | /* |
| 10491 | * A number of pmu->event_init() methods iterate the sibling_list to, |
| 10492 | * for example, validate if the group fits on the PMU. Therefore, |
| 10493 | * if this is a sibling event, acquire the ctx->mutex to protect |
| 10494 | * the sibling_list. |
| 10495 | */ |
| 10496 | if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 10497 | /* |
| 10498 | * This ctx->mutex can nest when we're called through |
| 10499 | * inheritance. See the perf_event_ctx_lock_nested() comment. |
| 10500 | */ |
| 10501 | ctx = perf_event_ctx_lock_nested(event->group_leader, |
| 10502 | SINGLE_DEPTH_NESTING); |
Peter Zijlstra | ccd41c8 | 2015-02-25 15:56:04 +0100 | [diff] [blame] | 10503 | BUG_ON(!ctx); |
| 10504 | } |
| 10505 | |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 10506 | event->pmu = pmu; |
| 10507 | ret = pmu->event_init(event); |
Peter Zijlstra | ccd41c8 | 2015-02-25 15:56:04 +0100 | [diff] [blame] | 10508 | |
| 10509 | if (ctx) |
| 10510 | perf_event_ctx_unlock(event->group_leader, ctx); |
| 10511 | |
Andrew Murray | cc6795a | 2019-01-10 13:53:25 +0000 | [diff] [blame] | 10512 | if (!ret) { |
Kan Liang | e321d02 | 2019-05-28 15:08:30 -0700 | [diff] [blame] | 10513 | if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && |
| 10514 | has_extended_regs(event)) |
| 10515 | ret = -EOPNOTSUPP; |
| 10516 | |
Andrew Murray | cc6795a | 2019-01-10 13:53:25 +0000 | [diff] [blame] | 10517 | if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && |
Kan Liang | e321d02 | 2019-05-28 15:08:30 -0700 | [diff] [blame] | 10518 | event_has_any_exclude_flag(event)) |
Andrew Murray | cc6795a | 2019-01-10 13:53:25 +0000 | [diff] [blame] | 10519 | ret = -EINVAL; |
Kan Liang | e321d02 | 2019-05-28 15:08:30 -0700 | [diff] [blame] | 10520 | |
| 10521 | if (ret && event->destroy) |
| 10522 | event->destroy(event); |
Andrew Murray | cc6795a | 2019-01-10 13:53:25 +0000 | [diff] [blame] | 10523 | } |
| 10524 | |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 10525 | if (ret) |
| 10526 | module_put(pmu->module); |
| 10527 | |
| 10528 | return ret; |
| 10529 | } |
| 10530 | |
Geliang Tang | 18ab2cd | 2015-09-27 23:25:50 +0800 | [diff] [blame] | 10531 | static struct pmu *perf_init_event(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10532 | { |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10533 | int idx, type, ret; |
Dan Carpenter | 85c617a | 2017-05-22 12:03:49 +0300 | [diff] [blame] | 10534 | struct pmu *pmu; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 10535 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10536 | idx = srcu_read_lock(&pmus_srcu); |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10537 | |
Kan Liang | 4099931 | 2017-01-18 08:21:01 -0500 | [diff] [blame] | 10538 | /* Try parent's PMU first: */ |
| 10539 | if (event->parent && event->parent->pmu) { |
| 10540 | pmu = event->parent->pmu; |
| 10541 | ret = perf_try_init_event(pmu, event); |
| 10542 | if (!ret) |
| 10543 | goto unlock; |
| 10544 | } |
| 10545 | |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10546 | /* |
| 10547 | * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE |
| 10548 | * are often aliases for PERF_TYPE_RAW. |
| 10549 | */ |
| 10550 | type = event->attr.type; |
| 10551 | if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) |
| 10552 | type = PERF_TYPE_RAW; |
| 10553 | |
| 10554 | again: |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10555 | rcu_read_lock(); |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10556 | pmu = idr_find(&pmu_idr, type); |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10557 | rcu_read_unlock(); |
Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 10558 | if (pmu) { |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 10559 | ret = perf_try_init_event(pmu, event); |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10560 | if (ret == -ENOENT && event->attr.type != type) { |
| 10561 | type = event->attr.type; |
| 10562 | goto again; |
| 10563 | } |
| 10564 | |
Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 10565 | if (ret) |
| 10566 | pmu = ERR_PTR(ret); |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 10567 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10568 | goto unlock; |
Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 10569 | } |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10570 | |
Sebastian Andrzej Siewior | 9f0bff1 | 2019-11-19 13:14:29 +0100 | [diff] [blame] | 10571 | list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) { |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 10572 | ret = perf_try_init_event(pmu, event); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10573 | if (!ret) |
Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 10574 | goto unlock; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 10575 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10576 | if (ret != -ENOENT) { |
| 10577 | pmu = ERR_PTR(ret); |
Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 10578 | goto unlock; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10579 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10580 | } |
Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 10581 | pmu = ERR_PTR(-ENOENT); |
| 10582 | unlock: |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10583 | srcu_read_unlock(&pmus_srcu, idx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10584 | |
| 10585 | return pmu; |
| 10586 | } |
| 10587 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 10588 | static void attach_sb_event(struct perf_event *event) |
| 10589 | { |
| 10590 | struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); |
| 10591 | |
| 10592 | raw_spin_lock(&pel->lock); |
| 10593 | list_add_rcu(&event->sb_list, &pel->list); |
| 10594 | raw_spin_unlock(&pel->lock); |
| 10595 | } |
| 10596 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 10597 | /* |
| 10598 | * We keep a list of all !task (and therefore per-cpu) events |
| 10599 | * that need to receive side-band records. |
| 10600 | * |
| 10601 | * This avoids having to scan all the various PMU per-cpu contexts |
| 10602 | * looking for them. |
| 10603 | */ |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 10604 | static void account_pmu_sb_event(struct perf_event *event) |
| 10605 | { |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 10606 | if (is_sb_event(event)) |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 10607 | attach_sb_event(event); |
| 10608 | } |
| 10609 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 10610 | static void account_event_cpu(struct perf_event *event, int cpu) |
| 10611 | { |
| 10612 | if (event->parent) |
| 10613 | return; |
| 10614 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 10615 | if (is_cgroup_event(event)) |
| 10616 | atomic_inc(&per_cpu(perf_cgroup_events, cpu)); |
| 10617 | } |
| 10618 | |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 10619 | /* Freq events need the tick to stay alive (see perf_event_task_tick). */ |
| 10620 | static void account_freq_event_nohz(void) |
| 10621 | { |
| 10622 | #ifdef CONFIG_NO_HZ_FULL |
| 10623 | /* Lock so we don't race with concurrent unaccount */ |
| 10624 | spin_lock(&nr_freq_lock); |
| 10625 | if (atomic_inc_return(&nr_freq_events) == 1) |
| 10626 | tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS); |
| 10627 | spin_unlock(&nr_freq_lock); |
| 10628 | #endif |
| 10629 | } |
| 10630 | |
| 10631 | static void account_freq_event(void) |
| 10632 | { |
| 10633 | if (tick_nohz_full_enabled()) |
| 10634 | account_freq_event_nohz(); |
| 10635 | else |
| 10636 | atomic_inc(&nr_freq_events); |
| 10637 | } |
| 10638 | |
| 10639 | |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 10640 | static void account_event(struct perf_event *event) |
| 10641 | { |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 10642 | bool inc = false; |
| 10643 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 10644 | if (event->parent) |
| 10645 | return; |
| 10646 | |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 10647 | if (event->attach_state & PERF_ATTACH_TASK) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 10648 | inc = true; |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 10649 | if (event->attr.mmap || event->attr.mmap_data) |
| 10650 | atomic_inc(&nr_mmap_events); |
| 10651 | if (event->attr.comm) |
| 10652 | atomic_inc(&nr_comm_events); |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 10653 | if (event->attr.namespaces) |
| 10654 | atomic_inc(&nr_namespaces_events); |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 10655 | if (event->attr.task) |
| 10656 | atomic_inc(&nr_task_events); |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 10657 | if (event->attr.freq) |
| 10658 | account_freq_event(); |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 10659 | if (event->attr.context_switch) { |
| 10660 | atomic_inc(&nr_switch_events); |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 10661 | inc = true; |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 10662 | } |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 10663 | if (has_branch_stack(event)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 10664 | inc = true; |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 10665 | if (is_cgroup_event(event)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 10666 | inc = true; |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 10667 | if (event->attr.ksymbol) |
| 10668 | atomic_inc(&nr_ksymbol_events); |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 10669 | if (event->attr.bpf_event) |
| 10670 | atomic_inc(&nr_bpf_events); |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 10671 | |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 10672 | if (inc) { |
Alexander Shishkin | 5bce9db | 2017-08-29 17:01:03 +0300 | [diff] [blame] | 10673 | /* |
| 10674 | * We need the mutex here because static_branch_enable() |
| 10675 | * must complete *before* the perf_sched_count increment |
| 10676 | * becomes visible. |
| 10677 | */ |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 10678 | if (atomic_inc_not_zero(&perf_sched_count)) |
| 10679 | goto enabled; |
| 10680 | |
| 10681 | mutex_lock(&perf_sched_mutex); |
| 10682 | if (!atomic_read(&perf_sched_count)) { |
| 10683 | static_branch_enable(&perf_sched_events); |
| 10684 | /* |
| 10685 | * Guarantee that all CPUs observe they key change and |
| 10686 | * call the perf scheduling hooks before proceeding to |
| 10687 | * install events that need them. |
| 10688 | */ |
Paul E. McKenney | 0809d954 | 2018-11-06 19:20:05 -0800 | [diff] [blame] | 10689 | synchronize_rcu(); |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 10690 | } |
| 10691 | /* |
| 10692 | * Now that we have waited for the sync_sched(), allow further |
| 10693 | * increments to by-pass the mutex. |
| 10694 | */ |
| 10695 | atomic_inc(&perf_sched_count); |
| 10696 | mutex_unlock(&perf_sched_mutex); |
| 10697 | } |
| 10698 | enabled: |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 10699 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 10700 | account_event_cpu(event, event->cpu); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 10701 | |
| 10702 | account_pmu_sb_event(event); |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 10703 | } |
| 10704 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10705 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 10706 | * Allocate and initialize an event structure |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10707 | */ |
| 10708 | static struct perf_event * |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 10709 | perf_event_alloc(struct perf_event_attr *attr, int cpu, |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 10710 | struct task_struct *task, |
| 10711 | struct perf_event *group_leader, |
| 10712 | struct perf_event *parent_event, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 10713 | perf_overflow_handler_t overflow_handler, |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 10714 | void *context, int cgroup_fd) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10715 | { |
Peter Zijlstra | 51b0fe3 | 2010-06-11 13:35:57 +0200 | [diff] [blame] | 10716 | struct pmu *pmu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10717 | struct perf_event *event; |
| 10718 | struct hw_perf_event *hwc; |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10719 | long err = -EINVAL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10720 | |
Oleg Nesterov | 66832eb | 2011-01-18 17:10:32 +0100 | [diff] [blame] | 10721 | if ((unsigned)cpu >= nr_cpu_ids) { |
| 10722 | if (!task || cpu != -1) |
| 10723 | return ERR_PTR(-EINVAL); |
| 10724 | } |
| 10725 | |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 10726 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10727 | if (!event) |
| 10728 | return ERR_PTR(-ENOMEM); |
| 10729 | |
| 10730 | /* |
| 10731 | * Single events are their own group leaders, with an |
| 10732 | * empty sibling list: |
| 10733 | */ |
| 10734 | if (!group_leader) |
| 10735 | group_leader = event; |
| 10736 | |
| 10737 | mutex_init(&event->child_mutex); |
| 10738 | INIT_LIST_HEAD(&event->child_list); |
| 10739 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10740 | INIT_LIST_HEAD(&event->event_entry); |
| 10741 | INIT_LIST_HEAD(&event->sibling_list); |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 10742 | INIT_LIST_HEAD(&event->active_list); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 10743 | init_event_group(event); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 10744 | INIT_LIST_HEAD(&event->rb_entry); |
Stephane Eranian | 71ad88e | 2013-11-12 17:58:48 +0100 | [diff] [blame] | 10745 | INIT_LIST_HEAD(&event->active_entry); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10746 | INIT_LIST_HEAD(&event->addr_filters.list); |
Stephane Eranian | f3ae75d | 2014-01-08 11:15:52 +0100 | [diff] [blame] | 10747 | INIT_HLIST_NODE(&event->hlist_entry); |
| 10748 | |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 10749 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10750 | init_waitqueue_head(&event->waitq); |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 10751 | event->pending_disable = -1; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 10752 | init_irq_work(&event->pending, perf_pending_event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10753 | |
| 10754 | mutex_init(&event->mmap_mutex); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10755 | raw_spin_lock_init(&event->addr_filters.lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10756 | |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 10757 | atomic_long_set(&event->refcount, 1); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10758 | event->cpu = cpu; |
| 10759 | event->attr = *attr; |
| 10760 | event->group_leader = group_leader; |
| 10761 | event->pmu = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10762 | event->oncpu = -1; |
| 10763 | |
| 10764 | event->parent = parent_event; |
| 10765 | |
Eric W. Biederman | 17cf22c | 2010-03-02 14:51:53 -0800 | [diff] [blame] | 10766 | event->ns = get_pid_ns(task_active_pid_ns(current)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10767 | event->id = atomic64_inc_return(&perf_event_id); |
| 10768 | |
| 10769 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 10770 | |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 10771 | if (task) { |
| 10772 | event->attach_state = PERF_ATTACH_TASK; |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 10773 | /* |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 10774 | * XXX pmu::event_init needs to know what task to account to |
| 10775 | * and we cannot use the ctx information because we need the |
| 10776 | * pmu before we get a ctx. |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 10777 | */ |
Matthew Wilcox (Oracle) | 7b3c92b | 2019-07-04 15:13:23 -0700 | [diff] [blame] | 10778 | event->hw.target = get_task_struct(task); |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 10779 | } |
| 10780 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 10781 | event->clock = &local_clock; |
| 10782 | if (parent_event) |
| 10783 | event->clock = parent_event->clock; |
| 10784 | |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 10785 | if (!overflow_handler && parent_event) { |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 10786 | overflow_handler = parent_event->overflow_handler; |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 10787 | context = parent_event->overflow_handler_context; |
Arnd Bergmann | f1e4ba5 | 2016-09-06 15:10:22 +0200 | [diff] [blame] | 10788 | #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING) |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10789 | if (overflow_handler == bpf_overflow_handler) { |
Andrii Nakryiko | 85192db | 2019-11-17 09:28:03 -0800 | [diff] [blame] | 10790 | struct bpf_prog *prog = parent_event->prog; |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10791 | |
Andrii Nakryiko | 85192db | 2019-11-17 09:28:03 -0800 | [diff] [blame] | 10792 | bpf_prog_inc(prog); |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10793 | event->prog = prog; |
| 10794 | event->orig_overflow_handler = |
| 10795 | parent_event->orig_overflow_handler; |
| 10796 | } |
| 10797 | #endif |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 10798 | } |
Oleg Nesterov | 66832eb | 2011-01-18 17:10:32 +0100 | [diff] [blame] | 10799 | |
Wang Nan | 1879445 | 2016-03-28 06:41:30 +0000 | [diff] [blame] | 10800 | if (overflow_handler) { |
| 10801 | event->overflow_handler = overflow_handler; |
| 10802 | event->overflow_handler_context = context; |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 10803 | } else if (is_write_backward(event)){ |
| 10804 | event->overflow_handler = perf_event_output_backward; |
| 10805 | event->overflow_handler_context = NULL; |
Wang Nan | 1879445 | 2016-03-28 06:41:30 +0000 | [diff] [blame] | 10806 | } else { |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 10807 | event->overflow_handler = perf_event_output_forward; |
Wang Nan | 1879445 | 2016-03-28 06:41:30 +0000 | [diff] [blame] | 10808 | event->overflow_handler_context = NULL; |
| 10809 | } |
Frederic Weisbecker | 97eaf53 | 2009-10-18 15:33:50 +0200 | [diff] [blame] | 10810 | |
Jiri Olsa | 0231bb5 | 2013-02-01 11:23:45 +0100 | [diff] [blame] | 10811 | perf_event__state_init(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10812 | |
| 10813 | pmu = NULL; |
| 10814 | |
| 10815 | hwc = &event->hw; |
| 10816 | hwc->sample_period = attr->sample_period; |
| 10817 | if (attr->freq && attr->sample_freq) |
| 10818 | hwc->sample_period = 1; |
| 10819 | hwc->last_period = hwc->sample_period; |
| 10820 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 10821 | local64_set(&hwc->period_left, hwc->sample_period); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10822 | |
| 10823 | /* |
Peter Zijlstra | ba5213a | 2017-05-30 11:45:12 +0200 | [diff] [blame] | 10824 | * We currently do not support PERF_SAMPLE_READ on inherited events. |
| 10825 | * See perf_output_read(). |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10826 | */ |
Peter Zijlstra | ba5213a | 2017-05-30 11:45:12 +0200 | [diff] [blame] | 10827 | if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)) |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10828 | goto err_ns; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10829 | |
Yan, Zheng | a46a230 | 2014-11-04 21:56:06 -0500 | [diff] [blame] | 10830 | if (!has_branch_stack(event)) |
| 10831 | event->attr.branch_sample_type = 0; |
| 10832 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10833 | pmu = perf_init_event(event); |
Dan Carpenter | 85c617a | 2017-05-22 12:03:49 +0300 | [diff] [blame] | 10834 | if (IS_ERR(pmu)) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10835 | err = PTR_ERR(pmu); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10836 | goto err_ns; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10837 | } |
| 10838 | |
Peter Zijlstra | 09f4e8f | 2019-11-06 12:51:04 +0100 | [diff] [blame] | 10839 | /* |
| 10840 | * Disallow uncore-cgroup events, they don't make sense as the cgroup will |
| 10841 | * be different on other CPUs in the uncore mask. |
| 10842 | */ |
| 10843 | if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) { |
| 10844 | err = -EINVAL; |
| 10845 | goto err_pmu; |
| 10846 | } |
| 10847 | |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 10848 | if (event->attr.aux_output && |
| 10849 | !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { |
| 10850 | err = -EOPNOTSUPP; |
| 10851 | goto err_pmu; |
| 10852 | } |
| 10853 | |
Peter Zijlstra | 98add2a | 2020-02-13 23:51:28 -0800 | [diff] [blame] | 10854 | if (cgroup_fd != -1) { |
| 10855 | err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); |
| 10856 | if (err) |
| 10857 | goto err_pmu; |
| 10858 | } |
| 10859 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 10860 | err = exclusive_event_init(event); |
| 10861 | if (err) |
| 10862 | goto err_pmu; |
| 10863 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10864 | if (has_addr_filter(event)) { |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 10865 | event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, |
| 10866 | sizeof(struct perf_addr_filter_range), |
| 10867 | GFP_KERNEL); |
| 10868 | if (!event->addr_filter_ranges) { |
Dan Carpenter | 36cc2b9 | 2017-05-22 12:04:18 +0300 | [diff] [blame] | 10869 | err = -ENOMEM; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10870 | goto err_per_task; |
Dan Carpenter | 36cc2b9 | 2017-05-22 12:04:18 +0300 | [diff] [blame] | 10871 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10872 | |
Alexander Shishkin | 18736ee | 2019-02-15 13:56:54 +0200 | [diff] [blame] | 10873 | /* |
| 10874 | * Clone the parent's vma offsets: they are valid until exec() |
| 10875 | * even if the mm is not shared with the parent. |
| 10876 | */ |
| 10877 | if (event->parent) { |
| 10878 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 10879 | |
| 10880 | raw_spin_lock_irq(&ifh->lock); |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 10881 | memcpy(event->addr_filter_ranges, |
| 10882 | event->parent->addr_filter_ranges, |
| 10883 | pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range)); |
Alexander Shishkin | 18736ee | 2019-02-15 13:56:54 +0200 | [diff] [blame] | 10884 | raw_spin_unlock_irq(&ifh->lock); |
| 10885 | } |
| 10886 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10887 | /* force hw sync on the address filters */ |
| 10888 | event->addr_filters_gen = 1; |
| 10889 | } |
| 10890 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10891 | if (!event->parent) { |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 10892 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { |
Arnaldo Carvalho de Melo | 97c79a3 | 2016-04-28 13:16:33 -0300 | [diff] [blame] | 10893 | err = get_callchain_buffers(attr->sample_max_stack); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10894 | if (err) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10895 | goto err_addr_filters; |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 10896 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10897 | } |
| 10898 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 10899 | err = security_perf_event_alloc(event); |
| 10900 | if (err) |
| 10901 | goto err_callchain_buffer; |
| 10902 | |
Alexander Shishkin | 927a557 | 2016-03-02 13:24:14 +0200 | [diff] [blame] | 10903 | /* symmetric to unaccount_event() in _free_event() */ |
| 10904 | account_event(event); |
| 10905 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10906 | return event; |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10907 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 10908 | err_callchain_buffer: |
| 10909 | if (!event->parent) { |
| 10910 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) |
| 10911 | put_callchain_buffers(); |
| 10912 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10913 | err_addr_filters: |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 10914 | kfree(event->addr_filter_ranges); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10915 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 10916 | err_per_task: |
| 10917 | exclusive_event_destroy(event); |
| 10918 | |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10919 | err_pmu: |
Peter Zijlstra | 98add2a | 2020-02-13 23:51:28 -0800 | [diff] [blame] | 10920 | if (is_cgroup_event(event)) |
| 10921 | perf_detach_cgroup(event); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10922 | if (event->destroy) |
| 10923 | event->destroy(event); |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 10924 | module_put(pmu->module); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10925 | err_ns: |
| 10926 | if (event->ns) |
| 10927 | put_pid_ns(event->ns); |
Prashant Bhole | 621b6d2 | 2018-04-09 19:03:46 +0900 | [diff] [blame] | 10928 | if (event->hw.target) |
| 10929 | put_task_struct(event->hw.target); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 10930 | kfree(event); |
| 10931 | |
| 10932 | return ERR_PTR(err); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10933 | } |
| 10934 | |
| 10935 | static int perf_copy_attr(struct perf_event_attr __user *uattr, |
| 10936 | struct perf_event_attr *attr) |
| 10937 | { |
| 10938 | u32 size; |
| 10939 | int ret; |
| 10940 | |
Aleksa Sarai | c2ba8f4 | 2019-10-01 11:10:55 +1000 | [diff] [blame] | 10941 | /* Zero the full structure, so that a short copy will be nice. */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10942 | memset(attr, 0, sizeof(*attr)); |
| 10943 | |
| 10944 | ret = get_user(size, &uattr->size); |
| 10945 | if (ret) |
| 10946 | return ret; |
| 10947 | |
Aleksa Sarai | c2ba8f4 | 2019-10-01 11:10:55 +1000 | [diff] [blame] | 10948 | /* ABI compatibility quirk: */ |
| 10949 | if (!size) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10950 | size = PERF_ATTR_SIZE_VER0; |
Aleksa Sarai | c2ba8f4 | 2019-10-01 11:10:55 +1000 | [diff] [blame] | 10951 | if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10952 | goto err_size; |
| 10953 | |
Aleksa Sarai | c2ba8f4 | 2019-10-01 11:10:55 +1000 | [diff] [blame] | 10954 | ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); |
| 10955 | if (ret) { |
| 10956 | if (ret == -E2BIG) |
| 10957 | goto err_size; |
| 10958 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10959 | } |
| 10960 | |
Meng Xu | f12f42a | 2017-08-23 17:07:50 -0400 | [diff] [blame] | 10961 | attr->size = size; |
| 10962 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 10963 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10964 | return -EINVAL; |
| 10965 | |
| 10966 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) |
| 10967 | return -EINVAL; |
| 10968 | |
| 10969 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) |
| 10970 | return -EINVAL; |
| 10971 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 10972 | if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { |
| 10973 | u64 mask = attr->branch_sample_type; |
| 10974 | |
| 10975 | /* only using defined bits */ |
| 10976 | if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) |
| 10977 | return -EINVAL; |
| 10978 | |
| 10979 | /* at least one branch bit must be set */ |
| 10980 | if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) |
| 10981 | return -EINVAL; |
| 10982 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 10983 | /* propagate priv level, when not set for branch */ |
| 10984 | if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { |
| 10985 | |
| 10986 | /* exclude_kernel checked on syscall entry */ |
| 10987 | if (!attr->exclude_kernel) |
| 10988 | mask |= PERF_SAMPLE_BRANCH_KERNEL; |
| 10989 | |
| 10990 | if (!attr->exclude_user) |
| 10991 | mask |= PERF_SAMPLE_BRANCH_USER; |
| 10992 | |
| 10993 | if (!attr->exclude_hv) |
| 10994 | mask |= PERF_SAMPLE_BRANCH_HV; |
| 10995 | /* |
| 10996 | * adjust user setting (for HW filter setup) |
| 10997 | */ |
| 10998 | attr->branch_sample_type = mask; |
| 10999 | } |
Stephane Eranian | e712209 | 2013-06-06 11:02:04 +0200 | [diff] [blame] | 11000 | /* privileged levels capture (kernel, hv): check permissions */ |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 11001 | if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) { |
| 11002 | ret = perf_allow_kernel(attr); |
| 11003 | if (ret) |
| 11004 | return ret; |
| 11005 | } |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 11006 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 11007 | |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 11008 | if (attr->sample_type & PERF_SAMPLE_REGS_USER) { |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 11009 | ret = perf_reg_validate(attr->sample_regs_user); |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 11010 | if (ret) |
| 11011 | return ret; |
| 11012 | } |
| 11013 | |
| 11014 | if (attr->sample_type & PERF_SAMPLE_STACK_USER) { |
| 11015 | if (!arch_perf_have_user_stack_dump()) |
| 11016 | return -ENOSYS; |
| 11017 | |
| 11018 | /* |
| 11019 | * We have __u32 type for the size, but so far |
| 11020 | * we can only use __u16 as maximum due to the |
| 11021 | * __u16 sample size limit. |
| 11022 | */ |
| 11023 | if (attr->sample_stack_user >= USHRT_MAX) |
Jiri Olsa | 78b562f | 2018-04-15 11:23:50 +0200 | [diff] [blame] | 11024 | return -EINVAL; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 11025 | else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) |
Jiri Olsa | 78b562f | 2018-04-15 11:23:50 +0200 | [diff] [blame] | 11026 | return -EINVAL; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 11027 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 11028 | |
Jiri Olsa | 5f97052 | 2018-03-12 14:45:46 +0100 | [diff] [blame] | 11029 | if (!attr->sample_max_stack) |
| 11030 | attr->sample_max_stack = sysctl_perf_event_max_stack; |
| 11031 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 11032 | if (attr->sample_type & PERF_SAMPLE_REGS_INTR) |
| 11033 | ret = perf_reg_validate(attr->sample_regs_intr); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11034 | out: |
| 11035 | return ret; |
| 11036 | |
| 11037 | err_size: |
| 11038 | put_user(sizeof(*attr), &uattr->size); |
| 11039 | ret = -E2BIG; |
| 11040 | goto out; |
| 11041 | } |
| 11042 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11043 | static int |
| 11044 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11045 | { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 11046 | struct perf_buffer *rb = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11047 | int ret = -EINVAL; |
| 11048 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11049 | if (!output_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11050 | goto set; |
| 11051 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11052 | /* don't allow circular references */ |
| 11053 | if (event == output_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11054 | goto out; |
| 11055 | |
Peter Zijlstra | 0f13930 | 2010-05-20 14:35:15 +0200 | [diff] [blame] | 11056 | /* |
| 11057 | * Don't allow cross-cpu buffers |
| 11058 | */ |
| 11059 | if (output_event->cpu != event->cpu) |
| 11060 | goto out; |
| 11061 | |
| 11062 | /* |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 11063 | * If its not a per-cpu rb, it must be the same task. |
Peter Zijlstra | 0f13930 | 2010-05-20 14:35:15 +0200 | [diff] [blame] | 11064 | */ |
| 11065 | if (output_event->cpu == -1 && output_event->ctx != event->ctx) |
| 11066 | goto out; |
| 11067 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 11068 | /* |
| 11069 | * Mixing clocks in the same buffer is trouble you don't need. |
| 11070 | */ |
| 11071 | if (output_event->clock != event->clock) |
| 11072 | goto out; |
| 11073 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 11074 | /* |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 11075 | * Either writing ring buffer from beginning or from end. |
| 11076 | * Mixing is not allowed. |
| 11077 | */ |
| 11078 | if (is_write_backward(output_event) != is_write_backward(event)) |
| 11079 | goto out; |
| 11080 | |
| 11081 | /* |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 11082 | * If both events generate aux data, they must be on the same PMU |
| 11083 | */ |
| 11084 | if (has_aux(event) && has_aux(output_event) && |
| 11085 | event->pmu != output_event->pmu) |
| 11086 | goto out; |
| 11087 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11088 | set: |
| 11089 | mutex_lock(&event->mmap_mutex); |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11090 | /* Can't redirect output if we've got an active mmap() */ |
| 11091 | if (atomic_read(&event->mmap_count)) |
| 11092 | goto unlock; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11093 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11094 | if (output_event) { |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 11095 | /* get the rb we want to redirect to */ |
| 11096 | rb = ring_buffer_get(output_event); |
| 11097 | if (!rb) |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11098 | goto unlock; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11099 | } |
| 11100 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 11101 | ring_buffer_attach(event, rb); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 11102 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11103 | ret = 0; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11104 | unlock: |
| 11105 | mutex_unlock(&event->mmap_mutex); |
| 11106 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11107 | out: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11108 | return ret; |
| 11109 | } |
| 11110 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 11111 | static void mutex_lock_double(struct mutex *a, struct mutex *b) |
| 11112 | { |
| 11113 | if (b < a) |
| 11114 | swap(a, b); |
| 11115 | |
| 11116 | mutex_lock(a); |
| 11117 | mutex_lock_nested(b, SINGLE_DEPTH_NESTING); |
| 11118 | } |
| 11119 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 11120 | static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) |
| 11121 | { |
| 11122 | bool nmi_safe = false; |
| 11123 | |
| 11124 | switch (clk_id) { |
| 11125 | case CLOCK_MONOTONIC: |
| 11126 | event->clock = &ktime_get_mono_fast_ns; |
| 11127 | nmi_safe = true; |
| 11128 | break; |
| 11129 | |
| 11130 | case CLOCK_MONOTONIC_RAW: |
| 11131 | event->clock = &ktime_get_raw_fast_ns; |
| 11132 | nmi_safe = true; |
| 11133 | break; |
| 11134 | |
| 11135 | case CLOCK_REALTIME: |
| 11136 | event->clock = &ktime_get_real_ns; |
| 11137 | break; |
| 11138 | |
| 11139 | case CLOCK_BOOTTIME: |
Jason A. Donenfeld | 9285ec4 | 2019-06-21 22:32:48 +0200 | [diff] [blame] | 11140 | event->clock = &ktime_get_boottime_ns; |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 11141 | break; |
| 11142 | |
| 11143 | case CLOCK_TAI: |
Jason A. Donenfeld | 9285ec4 | 2019-06-21 22:32:48 +0200 | [diff] [blame] | 11144 | event->clock = &ktime_get_clocktai_ns; |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 11145 | break; |
| 11146 | |
| 11147 | default: |
| 11148 | return -EINVAL; |
| 11149 | } |
| 11150 | |
| 11151 | if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) |
| 11152 | return -EINVAL; |
| 11153 | |
| 11154 | return 0; |
| 11155 | } |
| 11156 | |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 11157 | /* |
| 11158 | * Variation on perf_event_ctx_lock_nested(), except we take two context |
| 11159 | * mutexes. |
| 11160 | */ |
| 11161 | static struct perf_event_context * |
| 11162 | __perf_event_ctx_lock_double(struct perf_event *group_leader, |
| 11163 | struct perf_event_context *ctx) |
| 11164 | { |
| 11165 | struct perf_event_context *gctx; |
| 11166 | |
| 11167 | again: |
| 11168 | rcu_read_lock(); |
| 11169 | gctx = READ_ONCE(group_leader->ctx); |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 11170 | if (!refcount_inc_not_zero(&gctx->refcount)) { |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 11171 | rcu_read_unlock(); |
| 11172 | goto again; |
| 11173 | } |
| 11174 | rcu_read_unlock(); |
| 11175 | |
| 11176 | mutex_lock_double(&gctx->mutex, &ctx->mutex); |
| 11177 | |
| 11178 | if (group_leader->ctx != gctx) { |
| 11179 | mutex_unlock(&ctx->mutex); |
| 11180 | mutex_unlock(&gctx->mutex); |
| 11181 | put_ctx(gctx); |
| 11182 | goto again; |
| 11183 | } |
| 11184 | |
| 11185 | return gctx; |
| 11186 | } |
| 11187 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11188 | /** |
| 11189 | * sys_perf_event_open - open a performance event, associate it to a task/cpu |
| 11190 | * |
| 11191 | * @attr_uptr: event_id type attributes for monitoring/sampling |
| 11192 | * @pid: target pid |
| 11193 | * @cpu: target cpu |
| 11194 | * @group_fd: group leader event fd |
| 11195 | */ |
| 11196 | SYSCALL_DEFINE5(perf_event_open, |
| 11197 | struct perf_event_attr __user *, attr_uptr, |
| 11198 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) |
| 11199 | { |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11200 | struct perf_event *group_leader = NULL, *output_event = NULL; |
| 11201 | struct perf_event *event, *sibling; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11202 | struct perf_event_attr attr; |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 11203 | struct perf_event_context *ctx, *uninitialized_var(gctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11204 | struct file *event_file = NULL; |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 11205 | struct fd group = {NULL, 0}; |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 11206 | struct task_struct *task = NULL; |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 11207 | struct pmu *pmu; |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 11208 | int event_fd; |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11209 | int move_group = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11210 | int err; |
Yann Droneaud | a21b0b3 | 2014-01-05 21:36:33 +0100 | [diff] [blame] | 11211 | int f_flags = O_RDWR; |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 11212 | int cgroup_fd = -1; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11213 | |
| 11214 | /* for future expandability... */ |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 11215 | if (flags & ~PERF_FLAG_ALL) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11216 | return -EINVAL; |
| 11217 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 11218 | /* Do we allow access to perf_event_open(2) ? */ |
| 11219 | err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); |
| 11220 | if (err) |
| 11221 | return err; |
| 11222 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11223 | err = perf_copy_attr(attr_uptr, &attr); |
| 11224 | if (err) |
| 11225 | return err; |
| 11226 | |
| 11227 | if (!attr.exclude_kernel) { |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 11228 | err = perf_allow_kernel(&attr); |
| 11229 | if (err) |
| 11230 | return err; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11231 | } |
| 11232 | |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 11233 | if (attr.namespaces) { |
| 11234 | if (!capable(CAP_SYS_ADMIN)) |
| 11235 | return -EACCES; |
| 11236 | } |
| 11237 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11238 | if (attr.freq) { |
| 11239 | if (attr.sample_freq > sysctl_perf_event_sample_rate) |
| 11240 | return -EINVAL; |
Peter Zijlstra | 0819b2e | 2014-05-15 20:23:48 +0200 | [diff] [blame] | 11241 | } else { |
| 11242 | if (attr.sample_period & (1ULL << 63)) |
| 11243 | return -EINVAL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11244 | } |
| 11245 | |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 11246 | /* Only privileged users can get physical addresses */ |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 11247 | if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) { |
| 11248 | err = perf_allow_kernel(&attr); |
| 11249 | if (err) |
| 11250 | return err; |
| 11251 | } |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 11252 | |
David Howells | b0c8fdc | 2019-08-19 17:18:00 -0700 | [diff] [blame] | 11253 | err = security_locked_down(LOCKDOWN_PERF); |
| 11254 | if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR)) |
| 11255 | /* REGS_INTR can leak data, lockdown must prevent this */ |
| 11256 | return err; |
| 11257 | |
| 11258 | err = 0; |
| 11259 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 11260 | /* |
| 11261 | * In cgroup mode, the pid argument is used to pass the fd |
| 11262 | * opened to the cgroup directory in cgroupfs. The cpu argument |
| 11263 | * designates the cpu on which to monitor threads from that |
| 11264 | * cgroup. |
| 11265 | */ |
| 11266 | if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) |
| 11267 | return -EINVAL; |
| 11268 | |
Yann Droneaud | a21b0b3 | 2014-01-05 21:36:33 +0100 | [diff] [blame] | 11269 | if (flags & PERF_FLAG_FD_CLOEXEC) |
| 11270 | f_flags |= O_CLOEXEC; |
| 11271 | |
| 11272 | event_fd = get_unused_fd_flags(f_flags); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 11273 | if (event_fd < 0) |
| 11274 | return event_fd; |
| 11275 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11276 | if (group_fd != -1) { |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 11277 | err = perf_fget_light(group_fd, &group); |
| 11278 | if (err) |
Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 11279 | goto err_fd; |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 11280 | group_leader = group.file->private_data; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11281 | if (flags & PERF_FLAG_FD_OUTPUT) |
| 11282 | output_event = group_leader; |
| 11283 | if (flags & PERF_FLAG_FD_NO_GROUP) |
| 11284 | group_leader = NULL; |
| 11285 | } |
| 11286 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 11287 | if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { |
Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 11288 | task = find_lively_task_by_vpid(pid); |
| 11289 | if (IS_ERR(task)) { |
| 11290 | err = PTR_ERR(task); |
| 11291 | goto err_group_fd; |
| 11292 | } |
| 11293 | } |
| 11294 | |
Peter Zijlstra | 1f4ee50 | 2014-05-06 09:59:34 +0200 | [diff] [blame] | 11295 | if (task && group_leader && |
| 11296 | group_leader->attr.inherit != attr.inherit) { |
| 11297 | err = -EINVAL; |
| 11298 | goto err_task; |
| 11299 | } |
| 11300 | |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 11301 | if (task) { |
| 11302 | err = mutex_lock_interruptible(&task->signal->cred_guard_mutex); |
| 11303 | if (err) |
Alexander Levin | e5aeee5 | 2017-06-03 03:39:13 +0000 | [diff] [blame] | 11304 | goto err_task; |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 11305 | |
| 11306 | /* |
| 11307 | * Reuse ptrace permission checks for now. |
| 11308 | * |
| 11309 | * We must hold cred_guard_mutex across this and any potential |
| 11310 | * perf_install_in_context() call for this new event to |
| 11311 | * serialize against exec() altering our credentials (and the |
| 11312 | * perf_event_exit_task() that could imply). |
| 11313 | */ |
| 11314 | err = -EACCES; |
| 11315 | if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) |
| 11316 | goto err_cred; |
| 11317 | } |
| 11318 | |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 11319 | if (flags & PERF_FLAG_PID_CGROUP) |
| 11320 | cgroup_fd = pid; |
| 11321 | |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 11322 | event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 11323 | NULL, NULL, cgroup_fd); |
Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 11324 | if (IS_ERR(event)) { |
| 11325 | err = PTR_ERR(event); |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 11326 | goto err_cred; |
Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 11327 | } |
| 11328 | |
Vince Weaver | 53b2533 | 2014-05-16 17:12:12 -0400 | [diff] [blame] | 11329 | if (is_sampling_event(event)) { |
| 11330 | if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { |
Vineet Gupta | a139655 | 2016-05-09 15:07:40 +0530 | [diff] [blame] | 11331 | err = -EOPNOTSUPP; |
Vince Weaver | 53b2533 | 2014-05-16 17:12:12 -0400 | [diff] [blame] | 11332 | goto err_alloc; |
| 11333 | } |
| 11334 | } |
| 11335 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11336 | /* |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 11337 | * Special case software events and allow them to be part of |
| 11338 | * any hardware group. |
| 11339 | */ |
| 11340 | pmu = event->pmu; |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11341 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 11342 | if (attr.use_clockid) { |
| 11343 | err = perf_event_set_clock(event, attr.clockid); |
| 11344 | if (err) |
| 11345 | goto err_alloc; |
| 11346 | } |
| 11347 | |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 11348 | if (pmu->task_ctx_nr == perf_sw_context) |
| 11349 | event->event_caps |= PERF_EV_CAP_SOFTWARE; |
| 11350 | |
Song Liu | a1150c2 | 2018-05-03 12:47:16 -0700 | [diff] [blame] | 11351 | if (group_leader) { |
| 11352 | if (is_software_event(event) && |
| 11353 | !in_software_context(group_leader)) { |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11354 | /* |
Song Liu | a1150c2 | 2018-05-03 12:47:16 -0700 | [diff] [blame] | 11355 | * If the event is a sw event, but the group_leader |
| 11356 | * is on hw context. |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11357 | * |
Song Liu | a1150c2 | 2018-05-03 12:47:16 -0700 | [diff] [blame] | 11358 | * Allow the addition of software events to hw |
| 11359 | * groups, this is safe because software events |
| 11360 | * never fail to schedule. |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11361 | */ |
Song Liu | a1150c2 | 2018-05-03 12:47:16 -0700 | [diff] [blame] | 11362 | pmu = group_leader->ctx->pmu; |
| 11363 | } else if (!is_software_event(event) && |
| 11364 | is_software_event(group_leader) && |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 11365 | (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) { |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11366 | /* |
| 11367 | * In case the group is a pure software group, and we |
| 11368 | * try to add a hardware event, move the whole group to |
| 11369 | * the hardware context. |
| 11370 | */ |
| 11371 | move_group = 1; |
| 11372 | } |
| 11373 | } |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 11374 | |
| 11375 | /* |
| 11376 | * Get the target context (task or percpu): |
| 11377 | */ |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 11378 | ctx = find_get_context(pmu, task, event); |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 11379 | if (IS_ERR(ctx)) { |
| 11380 | err = PTR_ERR(ctx); |
Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 11381 | goto err_alloc; |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 11382 | } |
| 11383 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11384 | /* |
| 11385 | * Look up the group leader (we will attach this event to it): |
| 11386 | */ |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11387 | if (group_leader) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11388 | err = -EINVAL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11389 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11390 | /* |
| 11391 | * Do not allow a recursive hierarchy (this new sibling |
| 11392 | * becoming part of another group-sibling): |
| 11393 | */ |
| 11394 | if (group_leader->group_leader != group_leader) |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11395 | goto err_context; |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 11396 | |
| 11397 | /* All events in a group should have the same clock */ |
| 11398 | if (group_leader->clock != event->clock) |
| 11399 | goto err_context; |
| 11400 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11401 | /* |
Mark Rutland | 64aee2a | 2017-06-22 15:41:38 +0100 | [diff] [blame] | 11402 | * Make sure we're both events for the same CPU; |
| 11403 | * grouping events for different CPUs is broken; since |
| 11404 | * you can never concurrently schedule them anyhow. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11405 | */ |
Mark Rutland | 64aee2a | 2017-06-22 15:41:38 +0100 | [diff] [blame] | 11406 | if (group_leader->cpu != event->cpu) |
| 11407 | goto err_context; |
Peter Zijlstra | c3c87e7 | 2015-01-23 11:19:48 +0100 | [diff] [blame] | 11408 | |
Mark Rutland | 64aee2a | 2017-06-22 15:41:38 +0100 | [diff] [blame] | 11409 | /* |
| 11410 | * Make sure we're both on the same task, or both |
| 11411 | * per-CPU events. |
| 11412 | */ |
| 11413 | if (group_leader->ctx->task != ctx->task) |
| 11414 | goto err_context; |
| 11415 | |
| 11416 | /* |
| 11417 | * Do not allow to attach to a group in a different task |
| 11418 | * or CPU context. If we're moving SW events, we'll fix |
| 11419 | * this up later, so allow that. |
| 11420 | */ |
| 11421 | if (!move_group && group_leader->ctx != ctx) |
| 11422 | goto err_context; |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11423 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11424 | /* |
| 11425 | * Only a group leader can be exclusive or pinned |
| 11426 | */ |
| 11427 | if (attr.exclusive || attr.pinned) |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11428 | goto err_context; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11429 | } |
| 11430 | |
| 11431 | if (output_event) { |
| 11432 | err = perf_event_set_output(event, output_event); |
| 11433 | if (err) |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11434 | goto err_context; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11435 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11436 | |
Yann Droneaud | a21b0b3 | 2014-01-05 21:36:33 +0100 | [diff] [blame] | 11437 | event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, |
| 11438 | f_flags); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 11439 | if (IS_ERR(event_file)) { |
| 11440 | err = PTR_ERR(event_file); |
Alexander Shishkin | 201c2f8 | 2016-03-21 10:02:42 +0200 | [diff] [blame] | 11441 | event_file = NULL; |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11442 | goto err_context; |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 11443 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11444 | |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11445 | if (move_group) { |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 11446 | gctx = __perf_event_ctx_lock_double(group_leader, ctx); |
| 11447 | |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 11448 | if (gctx->task == TASK_TOMBSTONE) { |
| 11449 | err = -ESRCH; |
| 11450 | goto err_locked; |
| 11451 | } |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 11452 | |
| 11453 | /* |
| 11454 | * Check if we raced against another sys_perf_event_open() call |
| 11455 | * moving the software group underneath us. |
| 11456 | */ |
| 11457 | if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) { |
| 11458 | /* |
| 11459 | * If someone moved the group out from under us, check |
| 11460 | * if this new event wound up on the same ctx, if so |
| 11461 | * its the regular !move_group case, otherwise fail. |
| 11462 | */ |
| 11463 | if (gctx != ctx) { |
| 11464 | err = -EINVAL; |
| 11465 | goto err_locked; |
| 11466 | } else { |
| 11467 | perf_event_ctx_unlock(group_leader, gctx); |
| 11468 | move_group = 0; |
| 11469 | } |
| 11470 | } |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 11471 | |
| 11472 | /* |
| 11473 | * Failure to create exclusive events returns -EBUSY. |
| 11474 | */ |
| 11475 | err = -EBUSY; |
| 11476 | if (!exclusive_event_installable(group_leader, ctx)) |
| 11477 | goto err_locked; |
| 11478 | |
| 11479 | for_each_sibling_event(sibling, group_leader) { |
| 11480 | if (!exclusive_event_installable(sibling, ctx)) |
| 11481 | goto err_locked; |
| 11482 | } |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 11483 | } else { |
| 11484 | mutex_lock(&ctx->mutex); |
| 11485 | } |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11486 | |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 11487 | if (ctx->task == TASK_TOMBSTONE) { |
| 11488 | err = -ESRCH; |
| 11489 | goto err_locked; |
| 11490 | } |
| 11491 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 11492 | if (!perf_event_validate_size(event)) { |
| 11493 | err = -E2BIG; |
| 11494 | goto err_locked; |
| 11495 | } |
| 11496 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 11497 | if (!task) { |
| 11498 | /* |
| 11499 | * Check if the @cpu we're creating an event for is online. |
| 11500 | * |
| 11501 | * We use the perf_cpu_context::ctx::mutex to serialize against |
| 11502 | * the hotplug notifiers. See perf_event_{init,exit}_cpu(). |
| 11503 | */ |
| 11504 | struct perf_cpu_context *cpuctx = |
| 11505 | container_of(ctx, struct perf_cpu_context, ctx); |
| 11506 | |
| 11507 | if (!cpuctx->online) { |
| 11508 | err = -ENODEV; |
| 11509 | goto err_locked; |
| 11510 | } |
| 11511 | } |
| 11512 | |
Mark Rutland | da9ec3d | 2020-01-06 12:03:39 +0000 | [diff] [blame] | 11513 | if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { |
| 11514 | err = -EINVAL; |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 11515 | goto err_locked; |
Mark Rutland | da9ec3d | 2020-01-06 12:03:39 +0000 | [diff] [blame] | 11516 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 11517 | |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 11518 | /* |
| 11519 | * Must be under the same ctx::mutex as perf_install_in_context(), |
| 11520 | * because we need to serialize with concurrent event creation. |
| 11521 | */ |
| 11522 | if (!exclusive_event_installable(event, ctx)) { |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 11523 | err = -EBUSY; |
| 11524 | goto err_locked; |
| 11525 | } |
| 11526 | |
| 11527 | WARN_ON_ONCE(ctx->parent_ctx); |
| 11528 | |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 11529 | /* |
| 11530 | * This is the point on no return; we cannot fail hereafter. This is |
| 11531 | * where we start modifying current state. |
| 11532 | */ |
| 11533 | |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 11534 | if (move_group) { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 11535 | /* |
| 11536 | * See perf_event_ctx_lock() for comments on the details |
| 11537 | * of swizzling perf_event::ctx. |
| 11538 | */ |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 11539 | perf_remove_from_context(group_leader, 0); |
Peter Zijlstra | 279b516 | 2017-02-16 10:28:37 +0100 | [diff] [blame] | 11540 | put_ctx(gctx); |
Jiri Olsa | 0231bb5 | 2013-02-01 11:23:45 +0100 | [diff] [blame] | 11541 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 11542 | for_each_sibling_event(sibling, group_leader) { |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 11543 | perf_remove_from_context(sibling, 0); |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11544 | put_ctx(gctx); |
| 11545 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11546 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 11547 | /* |
| 11548 | * Wait for everybody to stop referencing the events through |
| 11549 | * the old lists, before installing it on new lists. |
| 11550 | */ |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11551 | synchronize_rcu(); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 11552 | |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 11553 | /* |
| 11554 | * Install the group siblings before the group leader. |
| 11555 | * |
| 11556 | * Because a group leader will try and install the entire group |
| 11557 | * (through the sibling list, which is still in-tact), we can |
| 11558 | * end up with siblings installed in the wrong context. |
| 11559 | * |
| 11560 | * By installing siblings first we NO-OP because they're not |
| 11561 | * reachable through the group lists. |
| 11562 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 11563 | for_each_sibling_event(sibling, group_leader) { |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 11564 | perf_event__state_init(sibling); |
Jiri Olsa | 9fc81d8 | 2014-12-10 21:23:51 +0100 | [diff] [blame] | 11565 | perf_install_in_context(ctx, sibling, sibling->cpu); |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11566 | get_ctx(ctx); |
| 11567 | } |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 11568 | |
| 11569 | /* |
| 11570 | * Removing from the context ends up with disabled |
| 11571 | * event. What we want here is event in the initial |
| 11572 | * startup state, ready to be add into new context. |
| 11573 | */ |
| 11574 | perf_event__state_init(group_leader); |
| 11575 | perf_install_in_context(ctx, group_leader, group_leader->cpu); |
| 11576 | get_ctx(ctx); |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 11577 | } |
| 11578 | |
Peter Zijlstra | f73e22a | 2015-09-09 20:48:22 +0200 | [diff] [blame] | 11579 | /* |
| 11580 | * Precalculate sample_data sizes; do while holding ctx::mutex such |
| 11581 | * that we're serialized against further additions and before |
| 11582 | * perf_install_in_context() which is the point the event is active and |
| 11583 | * can use these values. |
| 11584 | */ |
| 11585 | perf_event__header_size(event); |
| 11586 | perf_event__id_header_size(event); |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 11587 | |
Peter Zijlstra | 78cd2c7 | 2016-01-25 14:08:45 +0100 | [diff] [blame] | 11588 | event->owner = current; |
| 11589 | |
Yan, Zheng | e2d37cd | 2012-06-15 14:31:32 +0800 | [diff] [blame] | 11590 | perf_install_in_context(ctx, event, event->cpu); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 11591 | perf_unpin_context(ctx); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 11592 | |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 11593 | if (move_group) |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 11594 | perf_event_ctx_unlock(group_leader, gctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11595 | mutex_unlock(&ctx->mutex); |
| 11596 | |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 11597 | if (task) { |
| 11598 | mutex_unlock(&task->signal->cred_guard_mutex); |
| 11599 | put_task_struct(task); |
| 11600 | } |
| 11601 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11602 | mutex_lock(¤t->perf_event_mutex); |
| 11603 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); |
| 11604 | mutex_unlock(¤t->perf_event_mutex); |
| 11605 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 11606 | /* |
| 11607 | * Drop the reference on the group_event after placing the |
| 11608 | * new event on the sibling_list. This ensures destruction |
| 11609 | * of the group leader will find the pointer to itself in |
| 11610 | * perf_group_detach(). |
| 11611 | */ |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 11612 | fdput(group); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 11613 | fd_install(event_fd, event_file); |
| 11614 | return event_fd; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11615 | |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 11616 | err_locked: |
| 11617 | if (move_group) |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 11618 | perf_event_ctx_unlock(group_leader, gctx); |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 11619 | mutex_unlock(&ctx->mutex); |
| 11620 | /* err_file: */ |
| 11621 | fput(event_file); |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11622 | err_context: |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 11623 | perf_unpin_context(ctx); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 11624 | put_ctx(ctx); |
Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 11625 | err_alloc: |
Peter Zijlstra | 1300562 | 2016-02-24 18:45:41 +0100 | [diff] [blame] | 11626 | /* |
| 11627 | * If event_file is set, the fput() above will have called ->release() |
| 11628 | * and that will take care of freeing the event. |
| 11629 | */ |
| 11630 | if (!event_file) |
| 11631 | free_event(event); |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 11632 | err_cred: |
| 11633 | if (task) |
| 11634 | mutex_unlock(&task->signal->cred_guard_mutex); |
Peter Zijlstra | 1f4ee50 | 2014-05-06 09:59:34 +0200 | [diff] [blame] | 11635 | err_task: |
Peter Zijlstra | e7d0bc0 | 2010-10-14 16:54:51 +0200 | [diff] [blame] | 11636 | if (task) |
| 11637 | put_task_struct(task); |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 11638 | err_group_fd: |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 11639 | fdput(group); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 11640 | err_fd: |
| 11641 | put_unused_fd(event_fd); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11642 | return err; |
| 11643 | } |
| 11644 | |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11645 | /** |
| 11646 | * perf_event_create_kernel_counter |
| 11647 | * |
| 11648 | * @attr: attributes of the counter to create |
| 11649 | * @cpu: cpu in which the counter is bound |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 11650 | * @task: task to profile (NULL for percpu) |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11651 | */ |
| 11652 | struct perf_event * |
| 11653 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 11654 | struct task_struct *task, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 11655 | perf_overflow_handler_t overflow_handler, |
| 11656 | void *context) |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11657 | { |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11658 | struct perf_event_context *ctx; |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11659 | struct perf_event *event; |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11660 | int err; |
| 11661 | |
Alexander Shishkin | dce5aff | 2019-10-30 15:47:31 +0200 | [diff] [blame] | 11662 | /* |
| 11663 | * Grouping is not supported for kernel events, neither is 'AUX', |
| 11664 | * make sure the caller's intentions are adjusted. |
| 11665 | */ |
| 11666 | if (attr->aux_output) |
| 11667 | return ERR_PTR(-EINVAL); |
| 11668 | |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 11669 | event = perf_event_alloc(attr, cpu, task, NULL, NULL, |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 11670 | overflow_handler, context, -1); |
Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 11671 | if (IS_ERR(event)) { |
| 11672 | err = PTR_ERR(event); |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11673 | goto err; |
| 11674 | } |
| 11675 | |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 11676 | /* Mark owner so we could distinguish it from user events. */ |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 11677 | event->owner = TASK_TOMBSTONE; |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 11678 | |
Alexander Shishkin | f25d8ba | 2019-10-30 15:47:30 +0200 | [diff] [blame] | 11679 | /* |
| 11680 | * Get the target context (task or percpu): |
| 11681 | */ |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 11682 | ctx = find_get_context(event->pmu, task, event); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11683 | if (IS_ERR(ctx)) { |
| 11684 | err = PTR_ERR(ctx); |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11685 | goto err_free; |
Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 11686 | } |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11687 | |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11688 | WARN_ON_ONCE(ctx->parent_ctx); |
| 11689 | mutex_lock(&ctx->mutex); |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 11690 | if (ctx->task == TASK_TOMBSTONE) { |
| 11691 | err = -ESRCH; |
| 11692 | goto err_unlock; |
| 11693 | } |
| 11694 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 11695 | if (!task) { |
| 11696 | /* |
| 11697 | * Check if the @cpu we're creating an event for is online. |
| 11698 | * |
| 11699 | * We use the perf_cpu_context::ctx::mutex to serialize against |
| 11700 | * the hotplug notifiers. See perf_event_{init,exit}_cpu(). |
| 11701 | */ |
| 11702 | struct perf_cpu_context *cpuctx = |
| 11703 | container_of(ctx, struct perf_cpu_context, ctx); |
| 11704 | if (!cpuctx->online) { |
| 11705 | err = -ENODEV; |
| 11706 | goto err_unlock; |
| 11707 | } |
| 11708 | } |
| 11709 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 11710 | if (!exclusive_event_installable(event, ctx)) { |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 11711 | err = -EBUSY; |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 11712 | goto err_unlock; |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 11713 | } |
| 11714 | |
Leonard Crestez | 4ce54af | 2019-07-24 15:53:24 +0300 | [diff] [blame] | 11715 | perf_install_in_context(ctx, event, event->cpu); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 11716 | perf_unpin_context(ctx); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11717 | mutex_unlock(&ctx->mutex); |
| 11718 | |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11719 | return event; |
| 11720 | |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 11721 | err_unlock: |
| 11722 | mutex_unlock(&ctx->mutex); |
| 11723 | perf_unpin_context(ctx); |
| 11724 | put_ctx(ctx); |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11725 | err_free: |
| 11726 | free_event(event); |
| 11727 | err: |
Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 11728 | return ERR_PTR(err); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 11729 | } |
| 11730 | EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); |
| 11731 | |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11732 | void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) |
| 11733 | { |
| 11734 | struct perf_event_context *src_ctx; |
| 11735 | struct perf_event_context *dst_ctx; |
| 11736 | struct perf_event *event, *tmp; |
| 11737 | LIST_HEAD(events); |
| 11738 | |
| 11739 | src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; |
| 11740 | dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; |
| 11741 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 11742 | /* |
| 11743 | * See perf_event_ctx_lock() for comments on the details |
| 11744 | * of swizzling perf_event::ctx. |
| 11745 | */ |
| 11746 | mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11747 | list_for_each_entry_safe(event, tmp, &src_ctx->event_list, |
| 11748 | event_entry) { |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 11749 | perf_remove_from_context(event, 0); |
Frederic Weisbecker | 9a545de | 2013-07-23 02:31:03 +0200 | [diff] [blame] | 11750 | unaccount_event_cpu(event, src_cpu); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11751 | put_ctx(src_ctx); |
Peter Zijlstra | 9886167 | 2013-10-03 16:02:23 +0200 | [diff] [blame] | 11752 | list_add(&event->migrate_entry, &events); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11753 | } |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11754 | |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 11755 | /* |
| 11756 | * Wait for the events to quiesce before re-instating them. |
| 11757 | */ |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11758 | synchronize_rcu(); |
| 11759 | |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 11760 | /* |
| 11761 | * Re-instate events in 2 passes. |
| 11762 | * |
| 11763 | * Skip over group leaders and only install siblings on this first |
| 11764 | * pass, siblings will not get enabled without a leader, however a |
| 11765 | * leader will enable its siblings, even if those are still on the old |
| 11766 | * context. |
| 11767 | */ |
| 11768 | list_for_each_entry_safe(event, tmp, &events, migrate_entry) { |
| 11769 | if (event->group_leader == event) |
| 11770 | continue; |
| 11771 | |
| 11772 | list_del(&event->migrate_entry); |
| 11773 | if (event->state >= PERF_EVENT_STATE_OFF) |
| 11774 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 11775 | account_event_cpu(event, dst_cpu); |
| 11776 | perf_install_in_context(dst_ctx, event, dst_cpu); |
| 11777 | get_ctx(dst_ctx); |
| 11778 | } |
| 11779 | |
| 11780 | /* |
| 11781 | * Once all the siblings are setup properly, install the group leaders |
| 11782 | * to make it go. |
| 11783 | */ |
Peter Zijlstra | 9886167 | 2013-10-03 16:02:23 +0200 | [diff] [blame] | 11784 | list_for_each_entry_safe(event, tmp, &events, migrate_entry) { |
| 11785 | list_del(&event->migrate_entry); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11786 | if (event->state >= PERF_EVENT_STATE_OFF) |
| 11787 | event->state = PERF_EVENT_STATE_INACTIVE; |
Frederic Weisbecker | 9a545de | 2013-07-23 02:31:03 +0200 | [diff] [blame] | 11788 | account_event_cpu(event, dst_cpu); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11789 | perf_install_in_context(dst_ctx, event, dst_cpu); |
| 11790 | get_ctx(dst_ctx); |
| 11791 | } |
| 11792 | mutex_unlock(&dst_ctx->mutex); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 11793 | mutex_unlock(&src_ctx->mutex); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 11794 | } |
| 11795 | EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); |
| 11796 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11797 | static void sync_child_event(struct perf_event *child_event, |
| 11798 | struct task_struct *child) |
| 11799 | { |
| 11800 | struct perf_event *parent_event = child_event->parent; |
| 11801 | u64 child_val; |
| 11802 | |
| 11803 | if (child_event->attr.inherit_stat) |
| 11804 | perf_event_read_event(child_event, child); |
| 11805 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 11806 | child_val = perf_event_count(child_event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11807 | |
| 11808 | /* |
| 11809 | * Add back the child's count to the parent's count: |
| 11810 | */ |
Peter Zijlstra | a6e6dea | 2010-05-21 14:27:58 +0200 | [diff] [blame] | 11811 | atomic64_add(child_val, &parent_event->child_count); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11812 | atomic64_add(child_event->total_time_enabled, |
| 11813 | &parent_event->child_total_time_enabled); |
| 11814 | atomic64_add(child_event->total_time_running, |
| 11815 | &parent_event->child_total_time_running); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11816 | } |
| 11817 | |
| 11818 | static void |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 11819 | perf_event_exit_event(struct perf_event *child_event, |
| 11820 | struct perf_event_context *child_ctx, |
| 11821 | struct task_struct *child) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11822 | { |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 11823 | struct perf_event *parent_event = child_event->parent; |
| 11824 | |
Peter Zijlstra | 1903d50 | 2014-07-15 17:27:27 +0200 | [diff] [blame] | 11825 | /* |
| 11826 | * Do not destroy the 'original' grouping; because of the context |
| 11827 | * switch optimization the original events could've ended up in a |
| 11828 | * random child task. |
| 11829 | * |
| 11830 | * If we were to destroy the original group, all group related |
| 11831 | * operations would cease to function properly after this random |
| 11832 | * child dies. |
| 11833 | * |
| 11834 | * Do destroy all inherited groups, we don't care about those |
| 11835 | * and being thorough is better. |
| 11836 | */ |
Peter Zijlstra | 32132a3 | 2016-01-11 15:40:59 +0100 | [diff] [blame] | 11837 | raw_spin_lock_irq(&child_ctx->lock); |
| 11838 | WARN_ON_ONCE(child_ctx->is_active); |
| 11839 | |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 11840 | if (parent_event) |
Peter Zijlstra | 32132a3 | 2016-01-11 15:40:59 +0100 | [diff] [blame] | 11841 | perf_group_detach(child_event); |
| 11842 | list_del_event(child_event, child_ctx); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 11843 | perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */ |
Peter Zijlstra | 32132a3 | 2016-01-11 15:40:59 +0100 | [diff] [blame] | 11844 | raw_spin_unlock_irq(&child_ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11845 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11846 | /* |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 11847 | * Parent events are governed by their filedesc, retain them. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11848 | */ |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 11849 | if (!parent_event) { |
Jiri Olsa | 179033b | 2014-08-07 11:48:26 -0400 | [diff] [blame] | 11850 | perf_event_wakeup(child_event); |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 11851 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11852 | } |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 11853 | /* |
| 11854 | * Child events can be cleaned up. |
| 11855 | */ |
| 11856 | |
| 11857 | sync_child_event(child_event, child); |
| 11858 | |
| 11859 | /* |
| 11860 | * Remove this event from the parent's list |
| 11861 | */ |
| 11862 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); |
| 11863 | mutex_lock(&parent_event->child_mutex); |
| 11864 | list_del_init(&child_event->child_list); |
| 11865 | mutex_unlock(&parent_event->child_mutex); |
| 11866 | |
| 11867 | /* |
| 11868 | * Kick perf_poll() for is_event_hup(). |
| 11869 | */ |
| 11870 | perf_event_wakeup(parent_event); |
| 11871 | free_event(child_event); |
| 11872 | put_event(parent_event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11873 | } |
| 11874 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11875 | static void perf_event_exit_task_context(struct task_struct *child, int ctxn) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11876 | { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 11877 | struct perf_event_context *child_ctx, *clone_ctx = NULL; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 11878 | struct perf_event *child_event, *next; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11879 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 11880 | WARN_ON_ONCE(child != current); |
| 11881 | |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 11882 | child_ctx = perf_pin_task_context(child, ctxn); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 11883 | if (!child_ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11884 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11885 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11886 | /* |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 11887 | * In order to reduce the amount of tricky in ctx tear-down, we hold |
| 11888 | * ctx::mutex over the entire thing. This serializes against almost |
| 11889 | * everything that wants to access the ctx. |
| 11890 | * |
| 11891 | * The exception is sys_perf_event_open() / |
| 11892 | * perf_event_create_kernel_count() which does find_get_context() |
| 11893 | * without ctx::mutex (it cannot because of the move_group double mutex |
| 11894 | * lock thing). See the comments in perf_install_in_context(). |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11895 | */ |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 11896 | mutex_lock(&child_ctx->mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11897 | |
| 11898 | /* |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 11899 | * In a single ctx::lock section, de-schedule the events and detach the |
| 11900 | * context from the task such that we cannot ever get it scheduled back |
| 11901 | * in. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11902 | */ |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 11903 | raw_spin_lock_irq(&child_ctx->lock); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 11904 | task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL); |
Peter Zijlstra | 4a1c0f2 | 2014-06-23 16:12:42 +0200 | [diff] [blame] | 11905 | |
| 11906 | /* |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 11907 | * Now that the context is inactive, destroy the task <-> ctx relation |
| 11908 | * and mark the context dead. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11909 | */ |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 11910 | RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL); |
| 11911 | put_ctx(child_ctx); /* cannot be last */ |
| 11912 | WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); |
| 11913 | put_task_struct(current); /* cannot be last */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11914 | |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 11915 | clone_ctx = unclone_ctx(child_ctx); |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 11916 | raw_spin_unlock_irq(&child_ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11917 | |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 11918 | if (clone_ctx) |
| 11919 | put_ctx(clone_ctx); |
Peter Zijlstra | 4a1c0f2 | 2014-06-23 16:12:42 +0200 | [diff] [blame] | 11920 | |
| 11921 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11922 | * Report the task dead after unscheduling the events so that we |
| 11923 | * won't get any samples after PERF_RECORD_EXIT. We can however still |
| 11924 | * get a few PERF_RECORD_READ events. |
| 11925 | */ |
| 11926 | perf_event_task(child, child_ctx, 0); |
| 11927 | |
Peter Zijlstra | ebf905f | 2014-05-29 19:00:24 +0200 | [diff] [blame] | 11928 | list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 11929 | perf_event_exit_event(child_event, child_ctx, child); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 11930 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11931 | mutex_unlock(&child_ctx->mutex); |
| 11932 | |
| 11933 | put_ctx(child_ctx); |
| 11934 | } |
| 11935 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11936 | /* |
| 11937 | * When a child task exits, feed back event values to parent events. |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 11938 | * |
| 11939 | * Can be called with cred_guard_mutex held when called from |
| 11940 | * install_exec_creds(). |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11941 | */ |
| 11942 | void perf_event_exit_task(struct task_struct *child) |
| 11943 | { |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 11944 | struct perf_event *event, *tmp; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11945 | int ctxn; |
| 11946 | |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 11947 | mutex_lock(&child->perf_event_mutex); |
| 11948 | list_for_each_entry_safe(event, tmp, &child->perf_event_list, |
| 11949 | owner_entry) { |
| 11950 | list_del_init(&event->owner_entry); |
| 11951 | |
| 11952 | /* |
| 11953 | * Ensure the list deletion is visible before we clear |
| 11954 | * the owner, closes a race against perf_release() where |
| 11955 | * we need to serialize on the owner->perf_event_mutex. |
| 11956 | */ |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 11957 | smp_store_release(&event->owner, NULL); |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 11958 | } |
| 11959 | mutex_unlock(&child->perf_event_mutex); |
| 11960 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11961 | for_each_task_context_nr(ctxn) |
| 11962 | perf_event_exit_task_context(child, ctxn); |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 11963 | |
| 11964 | /* |
| 11965 | * The perf_event_exit_task_context calls perf_event_task |
| 11966 | * with child's task_ctx, which generates EXIT events for |
| 11967 | * child contexts and sets child->perf_event_ctxp[] to NULL. |
| 11968 | * At this point we need to send EXIT events to cpu contexts. |
| 11969 | */ |
| 11970 | perf_event_task(child, NULL, 0); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11971 | } |
| 11972 | |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 11973 | static void perf_free_event(struct perf_event *event, |
| 11974 | struct perf_event_context *ctx) |
| 11975 | { |
| 11976 | struct perf_event *parent = event->parent; |
| 11977 | |
| 11978 | if (WARN_ON_ONCE(!parent)) |
| 11979 | return; |
| 11980 | |
| 11981 | mutex_lock(&parent->child_mutex); |
| 11982 | list_del_init(&event->child_list); |
| 11983 | mutex_unlock(&parent->child_mutex); |
| 11984 | |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 11985 | put_event(parent); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 11986 | |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 11987 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 11988 | perf_group_detach(event); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 11989 | list_del_event(event, ctx); |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 11990 | raw_spin_unlock_irq(&ctx->lock); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 11991 | free_event(event); |
| 11992 | } |
| 11993 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11994 | /* |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 11995 | * Free a context as created by inheritance by perf_event_init_task() below, |
| 11996 | * used by fork() in case of fail. |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 11997 | * |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 11998 | * Even though the task has never lived, the context and events have been |
| 11999 | * exposed through the child_list, so we must take care tearing it all down. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12000 | */ |
| 12001 | void perf_event_free_task(struct task_struct *task) |
| 12002 | { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12003 | struct perf_event_context *ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12004 | struct perf_event *event, *tmp; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12005 | int ctxn; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12006 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12007 | for_each_task_context_nr(ctxn) { |
| 12008 | ctx = task->perf_event_ctxp[ctxn]; |
| 12009 | if (!ctx) |
| 12010 | continue; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12011 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12012 | mutex_lock(&ctx->mutex); |
Peter Zijlstra | e552a83 | 2017-03-16 13:47:48 +0100 | [diff] [blame] | 12013 | raw_spin_lock_irq(&ctx->lock); |
| 12014 | /* |
| 12015 | * Destroy the task <-> ctx relation and mark the context dead. |
| 12016 | * |
| 12017 | * This is important because even though the task hasn't been |
| 12018 | * exposed yet the context has been (through child_list). |
| 12019 | */ |
| 12020 | RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); |
| 12021 | WRITE_ONCE(ctx->task, TASK_TOMBSTONE); |
| 12022 | put_task_struct(task); /* cannot be last */ |
| 12023 | raw_spin_unlock_irq(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12024 | |
Peter Zijlstra | 15121c7 | 2017-03-16 13:47:50 +0100 | [diff] [blame] | 12025 | list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12026 | perf_free_event(event, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12027 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12028 | mutex_unlock(&ctx->mutex); |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 12029 | |
| 12030 | /* |
| 12031 | * perf_event_release_kernel() could've stolen some of our |
| 12032 | * child events and still have them on its free_list. In that |
| 12033 | * case we must wait for these events to have been freed (in |
| 12034 | * particular all their references to this task must've been |
| 12035 | * dropped). |
| 12036 | * |
| 12037 | * Without this copy_process() will unconditionally free this |
| 12038 | * task (irrespective of its reference count) and |
| 12039 | * _free_event()'s put_task_struct(event->hw.target) will be a |
| 12040 | * use-after-free. |
| 12041 | * |
| 12042 | * Wait for all events to drop their context reference. |
| 12043 | */ |
| 12044 | wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1); |
| 12045 | put_ctx(ctx); /* must be last */ |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12046 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12047 | } |
| 12048 | |
Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 12049 | void perf_event_delayed_put(struct task_struct *task) |
| 12050 | { |
| 12051 | int ctxn; |
| 12052 | |
| 12053 | for_each_task_context_nr(ctxn) |
| 12054 | WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); |
| 12055 | } |
| 12056 | |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 12057 | struct file *perf_event_get(unsigned int fd) |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12058 | { |
Al Viro | 02e5ad9 | 2019-06-26 20:43:53 -0400 | [diff] [blame] | 12059 | struct file *file = fget(fd); |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 12060 | if (!file) |
| 12061 | return ERR_PTR(-EBADF); |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12062 | |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 12063 | if (file->f_op != &perf_fops) { |
| 12064 | fput(file); |
| 12065 | return ERR_PTR(-EBADF); |
| 12066 | } |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12067 | |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 12068 | return file; |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12069 | } |
| 12070 | |
Yonghong Song | f8d959a | 2018-05-24 11:21:08 -0700 | [diff] [blame] | 12071 | const struct perf_event *perf_get_event(struct file *file) |
| 12072 | { |
| 12073 | if (file->f_op != &perf_fops) |
| 12074 | return ERR_PTR(-EINVAL); |
| 12075 | |
| 12076 | return file->private_data; |
| 12077 | } |
| 12078 | |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12079 | const struct perf_event_attr *perf_event_attrs(struct perf_event *event) |
| 12080 | { |
| 12081 | if (!event) |
| 12082 | return ERR_PTR(-EINVAL); |
| 12083 | |
| 12084 | return &event->attr; |
| 12085 | } |
| 12086 | |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12087 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 12088 | * Inherit an event from parent task to child task. |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 12089 | * |
| 12090 | * Returns: |
| 12091 | * - valid pointer on success |
| 12092 | * - NULL for orphaned events |
| 12093 | * - IS_ERR() on error |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12094 | */ |
| 12095 | static struct perf_event * |
| 12096 | inherit_event(struct perf_event *parent_event, |
| 12097 | struct task_struct *parent, |
| 12098 | struct perf_event_context *parent_ctx, |
| 12099 | struct task_struct *child, |
| 12100 | struct perf_event *group_leader, |
| 12101 | struct perf_event_context *child_ctx) |
| 12102 | { |
Peter Zijlstra | 8ca2bd4 | 2017-09-05 14:12:35 +0200 | [diff] [blame] | 12103 | enum perf_event_state parent_state = parent_event->state; |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12104 | struct perf_event *child_event; |
Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 12105 | unsigned long flags; |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12106 | |
| 12107 | /* |
| 12108 | * Instead of creating recursive hierarchies of events, |
| 12109 | * we link inherited events back to the original parent, |
| 12110 | * which has a filp for sure, which we use as the reference |
| 12111 | * count: |
| 12112 | */ |
| 12113 | if (parent_event->parent) |
| 12114 | parent_event = parent_event->parent; |
| 12115 | |
| 12116 | child_event = perf_event_alloc(&parent_event->attr, |
| 12117 | parent_event->cpu, |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 12118 | child, |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12119 | group_leader, parent_event, |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 12120 | NULL, NULL, -1); |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12121 | if (IS_ERR(child_event)) |
| 12122 | return child_event; |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 12123 | |
Jiri Olsa | 313ccb9 | 2018-01-07 17:03:47 +0100 | [diff] [blame] | 12124 | |
| 12125 | if ((child_event->attach_state & PERF_ATTACH_TASK_DATA) && |
| 12126 | !child_ctx->task_ctx_data) { |
| 12127 | struct pmu *pmu = child_event->pmu; |
| 12128 | |
| 12129 | child_ctx->task_ctx_data = kzalloc(pmu->task_ctx_size, |
| 12130 | GFP_KERNEL); |
| 12131 | if (!child_ctx->task_ctx_data) { |
| 12132 | free_event(child_event); |
Alexander Shishkin | 697d877 | 2019-11-05 09:57:02 +0200 | [diff] [blame] | 12133 | return ERR_PTR(-ENOMEM); |
Jiri Olsa | 313ccb9 | 2018-01-07 17:03:47 +0100 | [diff] [blame] | 12134 | } |
| 12135 | } |
| 12136 | |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 12137 | /* |
| 12138 | * is_orphaned_event() and list_add_tail(&parent_event->child_list) |
| 12139 | * must be under the same lock in order to serialize against |
| 12140 | * perf_event_release_kernel(), such that either we must observe |
| 12141 | * is_orphaned_event() or they will observe us on the child_list. |
| 12142 | */ |
| 12143 | mutex_lock(&parent_event->child_mutex); |
Jiri Olsa | fadfe7b | 2014-08-01 14:33:02 +0200 | [diff] [blame] | 12144 | if (is_orphaned_event(parent_event) || |
| 12145 | !atomic_long_inc_not_zero(&parent_event->refcount)) { |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 12146 | mutex_unlock(&parent_event->child_mutex); |
Jiri Olsa | 313ccb9 | 2018-01-07 17:03:47 +0100 | [diff] [blame] | 12147 | /* task_ctx_data is freed with child_ctx */ |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 12148 | free_event(child_event); |
| 12149 | return NULL; |
| 12150 | } |
| 12151 | |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12152 | get_ctx(child_ctx); |
| 12153 | |
| 12154 | /* |
| 12155 | * Make the child state follow the state of the parent event, |
| 12156 | * not its attr.disabled bit. We hold the parent's mutex, |
| 12157 | * so we won't race with perf_event_{en, dis}able_family. |
| 12158 | */ |
Jiri Olsa | 1929def | 2014-09-12 13:18:27 +0200 | [diff] [blame] | 12159 | if (parent_state >= PERF_EVENT_STATE_INACTIVE) |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12160 | child_event->state = PERF_EVENT_STATE_INACTIVE; |
| 12161 | else |
| 12162 | child_event->state = PERF_EVENT_STATE_OFF; |
| 12163 | |
| 12164 | if (parent_event->attr.freq) { |
| 12165 | u64 sample_period = parent_event->hw.sample_period; |
| 12166 | struct hw_perf_event *hwc = &child_event->hw; |
| 12167 | |
| 12168 | hwc->sample_period = sample_period; |
| 12169 | hwc->last_period = sample_period; |
| 12170 | |
| 12171 | local64_set(&hwc->period_left, sample_period); |
| 12172 | } |
| 12173 | |
| 12174 | child_event->ctx = child_ctx; |
| 12175 | child_event->overflow_handler = parent_event->overflow_handler; |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 12176 | child_event->overflow_handler_context |
| 12177 | = parent_event->overflow_handler_context; |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12178 | |
| 12179 | /* |
Thomas Gleixner | 614b678 | 2010-12-03 16:24:32 -0200 | [diff] [blame] | 12180 | * Precalculate sample_data sizes |
| 12181 | */ |
| 12182 | perf_event__header_size(child_event); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 12183 | perf_event__id_header_size(child_event); |
Thomas Gleixner | 614b678 | 2010-12-03 16:24:32 -0200 | [diff] [blame] | 12184 | |
| 12185 | /* |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12186 | * Link it up in the child's context: |
| 12187 | */ |
Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 12188 | raw_spin_lock_irqsave(&child_ctx->lock, flags); |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12189 | add_event_to_ctx(child_event, child_ctx); |
Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 12190 | raw_spin_unlock_irqrestore(&child_ctx->lock, flags); |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12191 | |
| 12192 | /* |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12193 | * Link this into the parent event's child list |
| 12194 | */ |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12195 | list_add_tail(&child_event->child_list, &parent_event->child_list); |
| 12196 | mutex_unlock(&parent_event->child_mutex); |
| 12197 | |
| 12198 | return child_event; |
| 12199 | } |
| 12200 | |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 12201 | /* |
| 12202 | * Inherits an event group. |
| 12203 | * |
| 12204 | * This will quietly suppress orphaned events; !inherit_event() is not an error. |
| 12205 | * This matches with perf_event_release_kernel() removing all child events. |
| 12206 | * |
| 12207 | * Returns: |
| 12208 | * - 0 on success |
| 12209 | * - <0 on error |
| 12210 | */ |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12211 | static int inherit_group(struct perf_event *parent_event, |
| 12212 | struct task_struct *parent, |
| 12213 | struct perf_event_context *parent_ctx, |
| 12214 | struct task_struct *child, |
| 12215 | struct perf_event_context *child_ctx) |
| 12216 | { |
| 12217 | struct perf_event *leader; |
| 12218 | struct perf_event *sub; |
| 12219 | struct perf_event *child_ctr; |
| 12220 | |
| 12221 | leader = inherit_event(parent_event, parent, parent_ctx, |
| 12222 | child, NULL, child_ctx); |
| 12223 | if (IS_ERR(leader)) |
| 12224 | return PTR_ERR(leader); |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 12225 | /* |
| 12226 | * @leader can be NULL here because of is_orphaned_event(). In this |
| 12227 | * case inherit_event() will create individual events, similar to what |
| 12228 | * perf_group_detach() would do anyway. |
| 12229 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 12230 | for_each_sibling_event(sub, parent_event) { |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12231 | child_ctr = inherit_event(sub, parent, parent_ctx, |
| 12232 | child, leader, child_ctx); |
| 12233 | if (IS_ERR(child_ctr)) |
| 12234 | return PTR_ERR(child_ctr); |
Alexander Shishkin | f733c6b | 2019-10-04 15:57:29 +0300 | [diff] [blame] | 12235 | |
Alexander Shishkin | 00496fe | 2019-11-01 17:12:48 +0200 | [diff] [blame] | 12236 | if (sub->aux_event == parent_event && child_ctr && |
Alexander Shishkin | f733c6b | 2019-10-04 15:57:29 +0300 | [diff] [blame] | 12237 | !perf_get_aux_event(child_ctr, leader)) |
| 12238 | return -EINVAL; |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 12239 | } |
| 12240 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12241 | } |
| 12242 | |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 12243 | /* |
| 12244 | * Creates the child task context and tries to inherit the event-group. |
| 12245 | * |
| 12246 | * Clears @inherited_all on !attr.inherited or error. Note that we'll leave |
| 12247 | * inherited_all set when we 'fail' to inherit an orphaned event; this is |
| 12248 | * consistent with perf_event_release_kernel() removing all child events. |
| 12249 | * |
| 12250 | * Returns: |
| 12251 | * - 0 on success |
| 12252 | * - <0 on error |
| 12253 | */ |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12254 | static int |
| 12255 | inherit_task_group(struct perf_event *event, struct task_struct *parent, |
| 12256 | struct perf_event_context *parent_ctx, |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12257 | struct task_struct *child, int ctxn, |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12258 | int *inherited_all) |
| 12259 | { |
| 12260 | int ret; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12261 | struct perf_event_context *child_ctx; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12262 | |
| 12263 | if (!event->attr.inherit) { |
| 12264 | *inherited_all = 0; |
| 12265 | return 0; |
| 12266 | } |
| 12267 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 12268 | child_ctx = child->perf_event_ctxp[ctxn]; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12269 | if (!child_ctx) { |
| 12270 | /* |
| 12271 | * This is executed from the parent task context, so |
| 12272 | * inherit events that have been marked for cloning. |
| 12273 | * First allocate and initialize a context for the |
| 12274 | * child. |
| 12275 | */ |
Jiri Olsa | 734df5a | 2013-07-09 17:44:10 +0200 | [diff] [blame] | 12276 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12277 | if (!child_ctx) |
| 12278 | return -ENOMEM; |
| 12279 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12280 | child->perf_event_ctxp[ctxn] = child_ctx; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12281 | } |
| 12282 | |
| 12283 | ret = inherit_group(event, parent, parent_ctx, |
| 12284 | child, child_ctx); |
| 12285 | |
| 12286 | if (ret) |
| 12287 | *inherited_all = 0; |
| 12288 | |
| 12289 | return ret; |
| 12290 | } |
| 12291 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12292 | /* |
| 12293 | * Initialize the perf_event context in task_struct |
| 12294 | */ |
Jiri Olsa | 985c8dc | 2014-06-24 10:20:24 +0200 | [diff] [blame] | 12295 | static int perf_event_init_context(struct task_struct *child, int ctxn) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12296 | { |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12297 | struct perf_event_context *child_ctx, *parent_ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12298 | struct perf_event_context *cloned_ctx; |
| 12299 | struct perf_event *event; |
| 12300 | struct task_struct *parent = current; |
| 12301 | int inherited_all = 1; |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 12302 | unsigned long flags; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12303 | int ret = 0; |
| 12304 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12305 | if (likely(!parent->perf_event_ctxp[ctxn])) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12306 | return 0; |
| 12307 | |
| 12308 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12309 | * If the parent's context is a clone, pin it so it won't get |
| 12310 | * swapped under us. |
| 12311 | */ |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12312 | parent_ctx = perf_pin_task_context(parent, ctxn); |
Peter Zijlstra | ffb4ef2 | 2014-05-05 19:12:20 +0200 | [diff] [blame] | 12313 | if (!parent_ctx) |
| 12314 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12315 | |
| 12316 | /* |
| 12317 | * No need to check if parent_ctx != NULL here; since we saw |
| 12318 | * it non-NULL earlier, the only reason for it to become NULL |
| 12319 | * is if we exit, and since we're currently in the middle of |
| 12320 | * a fork we can't be exiting at the same time. |
| 12321 | */ |
| 12322 | |
| 12323 | /* |
| 12324 | * Lock the parent list. No need to lock the child - not PID |
| 12325 | * hashed yet and not running, so nobody can access it. |
| 12326 | */ |
| 12327 | mutex_lock(&parent_ctx->mutex); |
| 12328 | |
| 12329 | /* |
| 12330 | * We dont have to disable NMIs - we are only looking at |
| 12331 | * the list, not manipulating it: |
| 12332 | */ |
Peter Zijlstra | 6e6804d | 2017-11-13 14:28:41 +0100 | [diff] [blame] | 12333 | perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12334 | ret = inherit_task_group(event, parent, parent_ctx, |
| 12335 | child, ctxn, &inherited_all); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12336 | if (ret) |
Peter Zijlstra | e7cc486 | 2017-03-16 13:47:49 +0100 | [diff] [blame] | 12337 | goto out_unlock; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12338 | } |
| 12339 | |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 12340 | /* |
| 12341 | * We can't hold ctx->lock when iterating the ->flexible_group list due |
| 12342 | * to allocations, but we need to prevent rotation because |
| 12343 | * rotate_ctx() will change the list from interrupt context. |
| 12344 | */ |
| 12345 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
| 12346 | parent_ctx->rotate_disable = 1; |
| 12347 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
| 12348 | |
Peter Zijlstra | 6e6804d | 2017-11-13 14:28:41 +0100 | [diff] [blame] | 12349 | perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12350 | ret = inherit_task_group(event, parent, parent_ctx, |
| 12351 | child, ctxn, &inherited_all); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12352 | if (ret) |
Peter Zijlstra | e7cc486 | 2017-03-16 13:47:49 +0100 | [diff] [blame] | 12353 | goto out_unlock; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12354 | } |
| 12355 | |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 12356 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
| 12357 | parent_ctx->rotate_disable = 0; |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 12358 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12359 | child_ctx = child->perf_event_ctxp[ctxn]; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12360 | |
Peter Zijlstra | 05cbaa2 | 2009-12-30 16:00:35 +0100 | [diff] [blame] | 12361 | if (child_ctx && inherited_all) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12362 | /* |
| 12363 | * Mark the child context as a clone of the parent |
| 12364 | * context, or of whatever the parent is a clone of. |
Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 12365 | * |
| 12366 | * Note that if the parent is a clone, the holding of |
| 12367 | * parent_ctx->lock avoids it from being uncloned. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12368 | */ |
Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 12369 | cloned_ctx = parent_ctx->parent_ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12370 | if (cloned_ctx) { |
| 12371 | child_ctx->parent_ctx = cloned_ctx; |
| 12372 | child_ctx->parent_gen = parent_ctx->parent_gen; |
| 12373 | } else { |
| 12374 | child_ctx->parent_ctx = parent_ctx; |
| 12375 | child_ctx->parent_gen = parent_ctx->generation; |
| 12376 | } |
| 12377 | get_ctx(child_ctx->parent_ctx); |
| 12378 | } |
| 12379 | |
Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 12380 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
Peter Zijlstra | e7cc486 | 2017-03-16 13:47:49 +0100 | [diff] [blame] | 12381 | out_unlock: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12382 | mutex_unlock(&parent_ctx->mutex); |
| 12383 | |
| 12384 | perf_unpin_context(parent_ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 12385 | put_ctx(parent_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12386 | |
| 12387 | return ret; |
| 12388 | } |
| 12389 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12390 | /* |
| 12391 | * Initialize the perf_event context in task_struct |
| 12392 | */ |
| 12393 | int perf_event_init_task(struct task_struct *child) |
| 12394 | { |
| 12395 | int ctxn, ret; |
| 12396 | |
Oleg Nesterov | 8550d7c | 2011-01-19 19:22:28 +0100 | [diff] [blame] | 12397 | memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); |
| 12398 | mutex_init(&child->perf_event_mutex); |
| 12399 | INIT_LIST_HEAD(&child->perf_event_list); |
| 12400 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12401 | for_each_task_context_nr(ctxn) { |
| 12402 | ret = perf_event_init_context(child, ctxn); |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 12403 | if (ret) { |
| 12404 | perf_event_free_task(child); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12405 | return ret; |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 12406 | } |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12407 | } |
| 12408 | |
| 12409 | return 0; |
| 12410 | } |
| 12411 | |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 12412 | static void __init perf_event_init_all_cpus(void) |
| 12413 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 12414 | struct swevent_htable *swhash; |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 12415 | int cpu; |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 12416 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12417 | zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL); |
| 12418 | |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 12419 | for_each_possible_cpu(cpu) { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 12420 | swhash = &per_cpu(swevent_htable, cpu); |
| 12421 | mutex_init(&swhash->hlist_mutex); |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 12422 | INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 12423 | |
| 12424 | INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu)); |
| 12425 | raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu)); |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 12426 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 12427 | #ifdef CONFIG_CGROUP_PERF |
| 12428 | INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu)); |
| 12429 | #endif |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 12430 | INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu)); |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 12431 | } |
| 12432 | } |
| 12433 | |
Valdis Kletnieks | d18bf42 | 2019-03-12 04:06:37 -0400 | [diff] [blame] | 12434 | static void perf_swevent_init_cpu(unsigned int cpu) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12435 | { |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12436 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12437 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 12438 | mutex_lock(&swhash->hlist_mutex); |
Thomas Gleixner | 059fcd8 | 2016-02-09 20:11:34 +0000 | [diff] [blame] | 12439 | if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 12440 | struct swevent_hlist *hlist; |
| 12441 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 12442 | hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); |
| 12443 | WARN_ON(!hlist); |
| 12444 | rcu_assign_pointer(swhash->swevent_hlist, hlist); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 12445 | } |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 12446 | mutex_unlock(&swhash->hlist_mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12447 | } |
| 12448 | |
Dave Young | 2965faa | 2015-09-09 15:38:55 -0700 | [diff] [blame] | 12449 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12450 | static void __perf_event_exit_context(void *__info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12451 | { |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12452 | struct perf_event_context *ctx = __info; |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 12453 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 12454 | struct perf_event *event; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12455 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 12456 | raw_spin_lock(&ctx->lock); |
Peter Zijlstra | 0ee098c | 2017-09-05 13:24:28 +0200 | [diff] [blame] | 12457 | ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 12458 | list_for_each_entry(event, &ctx->event_list, event_entry) |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 12459 | __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 12460 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12461 | } |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12462 | |
| 12463 | static void perf_event_exit_cpu_context(int cpu) |
| 12464 | { |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12465 | struct perf_cpu_context *cpuctx; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12466 | struct perf_event_context *ctx; |
| 12467 | struct pmu *pmu; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12468 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12469 | mutex_lock(&pmus_lock); |
| 12470 | list_for_each_entry(pmu, &pmus, entry) { |
| 12471 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
| 12472 | ctx = &cpuctx->ctx; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12473 | |
| 12474 | mutex_lock(&ctx->mutex); |
| 12475 | smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12476 | cpuctx->online = 0; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12477 | mutex_unlock(&ctx->mutex); |
| 12478 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12479 | cpumask_clear_cpu(cpu, perf_online_mask); |
| 12480 | mutex_unlock(&pmus_lock); |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12481 | } |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 12482 | #else |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 12483 | |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 12484 | static void perf_event_exit_cpu_context(int cpu) { } |
| 12485 | |
| 12486 | #endif |
| 12487 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12488 | int perf_event_init_cpu(unsigned int cpu) |
| 12489 | { |
| 12490 | struct perf_cpu_context *cpuctx; |
| 12491 | struct perf_event_context *ctx; |
| 12492 | struct pmu *pmu; |
| 12493 | |
| 12494 | perf_swevent_init_cpu(cpu); |
| 12495 | |
| 12496 | mutex_lock(&pmus_lock); |
| 12497 | cpumask_set_cpu(cpu, perf_online_mask); |
| 12498 | list_for_each_entry(pmu, &pmus, entry) { |
| 12499 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
| 12500 | ctx = &cpuctx->ctx; |
| 12501 | |
| 12502 | mutex_lock(&ctx->mutex); |
| 12503 | cpuctx->online = 1; |
| 12504 | mutex_unlock(&ctx->mutex); |
| 12505 | } |
| 12506 | mutex_unlock(&pmus_lock); |
| 12507 | |
| 12508 | return 0; |
| 12509 | } |
| 12510 | |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 12511 | int perf_event_exit_cpu(unsigned int cpu) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12512 | { |
Peter Zijlstra | e3703f8 | 2014-02-24 12:06:12 +0100 | [diff] [blame] | 12513 | perf_event_exit_cpu_context(cpu); |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 12514 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12515 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12516 | |
Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 12517 | static int |
| 12518 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) |
| 12519 | { |
| 12520 | int cpu; |
| 12521 | |
| 12522 | for_each_online_cpu(cpu) |
| 12523 | perf_event_exit_cpu(cpu); |
| 12524 | |
| 12525 | return NOTIFY_OK; |
| 12526 | } |
| 12527 | |
| 12528 | /* |
| 12529 | * Run the perf reboot notifier at the very last possible moment so that |
| 12530 | * the generic watchdog code runs as long as possible. |
| 12531 | */ |
| 12532 | static struct notifier_block perf_reboot_notifier = { |
| 12533 | .notifier_call = perf_reboot, |
| 12534 | .priority = INT_MIN, |
| 12535 | }; |
| 12536 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12537 | void __init perf_event_init(void) |
| 12538 | { |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 12539 | int ret; |
| 12540 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 12541 | idr_init(&pmu_idr); |
| 12542 | |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 12543 | perf_event_init_all_cpus(); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 12544 | init_srcu_struct(&pmus_srcu); |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 12545 | perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); |
| 12546 | perf_pmu_register(&perf_cpu_clock, NULL, -1); |
| 12547 | perf_pmu_register(&perf_task_clock, NULL, -1); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 12548 | perf_tp_register(); |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 12549 | perf_event_init_cpu(smp_processor_id()); |
Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 12550 | register_reboot_notifier(&perf_reboot_notifier); |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 12551 | |
| 12552 | ret = init_hw_breakpoint(); |
| 12553 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 12554 | |
Jiri Olsa | b01c3a0 | 2012-03-23 15:41:20 +0100 | [diff] [blame] | 12555 | /* |
| 12556 | * Build time assertion that we keep the data_head at the intended |
| 12557 | * location. IOW, validation we got the __reserved[] size right. |
| 12558 | */ |
| 12559 | BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) |
| 12560 | != 1024); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12561 | } |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 12562 | |
Cody P Schafer | fd979c0 | 2015-01-30 13:45:57 -0800 | [diff] [blame] | 12563 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 12564 | char *page) |
| 12565 | { |
| 12566 | struct perf_pmu_events_attr *pmu_attr = |
| 12567 | container_of(attr, struct perf_pmu_events_attr, attr); |
| 12568 | |
| 12569 | if (pmu_attr->event_str) |
| 12570 | return sprintf(page, "%s\n", pmu_attr->event_str); |
| 12571 | |
| 12572 | return 0; |
| 12573 | } |
Thomas Gleixner | 675965b | 2016-02-22 22:19:27 +0000 | [diff] [blame] | 12574 | EXPORT_SYMBOL_GPL(perf_event_sysfs_show); |
Cody P Schafer | fd979c0 | 2015-01-30 13:45:57 -0800 | [diff] [blame] | 12575 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 12576 | static int __init perf_event_sysfs_init(void) |
| 12577 | { |
| 12578 | struct pmu *pmu; |
| 12579 | int ret; |
| 12580 | |
| 12581 | mutex_lock(&pmus_lock); |
| 12582 | |
| 12583 | ret = bus_register(&pmu_bus); |
| 12584 | if (ret) |
| 12585 | goto unlock; |
| 12586 | |
| 12587 | list_for_each_entry(pmu, &pmus, entry) { |
| 12588 | if (!pmu->name || pmu->type < 0) |
| 12589 | continue; |
| 12590 | |
| 12591 | ret = pmu_dev_alloc(pmu); |
| 12592 | WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); |
| 12593 | } |
| 12594 | pmu_bus_running = 1; |
| 12595 | ret = 0; |
| 12596 | |
| 12597 | unlock: |
| 12598 | mutex_unlock(&pmus_lock); |
| 12599 | |
| 12600 | return ret; |
| 12601 | } |
| 12602 | device_initcall(perf_event_sysfs_init); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12603 | |
| 12604 | #ifdef CONFIG_CGROUP_PERF |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 12605 | static struct cgroup_subsys_state * |
| 12606 | perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12607 | { |
| 12608 | struct perf_cgroup *jc; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12609 | |
Li Zefan | 1b15d05 | 2011-03-03 14:26:06 +0800 | [diff] [blame] | 12610 | jc = kzalloc(sizeof(*jc), GFP_KERNEL); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12611 | if (!jc) |
| 12612 | return ERR_PTR(-ENOMEM); |
| 12613 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12614 | jc->info = alloc_percpu(struct perf_cgroup_info); |
| 12615 | if (!jc->info) { |
| 12616 | kfree(jc); |
| 12617 | return ERR_PTR(-ENOMEM); |
| 12618 | } |
| 12619 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12620 | return &jc->css; |
| 12621 | } |
| 12622 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 12623 | static void perf_cgroup_css_free(struct cgroup_subsys_state *css) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12624 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 12625 | struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); |
| 12626 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12627 | free_percpu(jc->info); |
| 12628 | kfree(jc); |
| 12629 | } |
| 12630 | |
| 12631 | static int __perf_cgroup_move(void *info) |
| 12632 | { |
| 12633 | struct task_struct *task = info; |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 12634 | rcu_read_lock(); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12635 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 12636 | rcu_read_unlock(); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12637 | return 0; |
| 12638 | } |
| 12639 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 12640 | static void perf_cgroup_attach(struct cgroup_taskset *tset) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12641 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 12642 | struct task_struct *task; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 12643 | struct cgroup_subsys_state *css; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 12644 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 12645 | cgroup_taskset_for_each(task, css, tset) |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 12646 | task_function_call(task, __perf_cgroup_move, task); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12647 | } |
| 12648 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 12649 | struct cgroup_subsys perf_event_cgrp_subsys = { |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 12650 | .css_alloc = perf_cgroup_css_alloc, |
| 12651 | .css_free = perf_cgroup_css_free, |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 12652 | .attach = perf_cgroup_attach, |
Tejun Heo | 968ebff | 2017-01-29 14:35:20 -0500 | [diff] [blame] | 12653 | /* |
| 12654 | * Implicitly enable on dfl hierarchy so that perf events can |
| 12655 | * always be filtered by cgroup2 path as long as perf_event |
| 12656 | * controller is not mounted on a legacy hierarchy. |
| 12657 | */ |
| 12658 | .implicit_on_dfl = true, |
Tejun Heo | 8cfd814 | 2017-07-21 11:14:51 -0400 | [diff] [blame] | 12659 | .threaded = true, |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12660 | }; |
| 12661 | #endif /* CONFIG_CGROUP_PERF */ |