Thomas Gleixner | 8e86e01 | 2019-01-16 12:10:59 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2 | /* |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 3 | * Performance events core code: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4 | * |
| 5 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 7 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra |
Al Viro | d36b691 | 2011-12-29 17:09:01 -0500 | [diff] [blame] | 8 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/cpu.h> |
| 14 | #include <linux/smp.h> |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 15 | #include <linux/idr.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 16 | #include <linux/file.h> |
| 17 | #include <linux/poll.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/slab.h> |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 19 | #include <linux/hash.h> |
Frederic Weisbecker | 12351ef | 2013-04-20 15:48:22 +0200 | [diff] [blame] | 20 | #include <linux/tick.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 21 | #include <linux/sysfs.h> |
| 22 | #include <linux/dcache.h> |
| 23 | #include <linux/percpu.h> |
| 24 | #include <linux/ptrace.h> |
Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 25 | #include <linux/reboot.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 26 | #include <linux/vmstat.h> |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 27 | #include <linux/device.h> |
Paul Gortmaker | 6e5fdee | 2011-05-26 16:00:52 -0400 | [diff] [blame] | 28 | #include <linux/export.h> |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 29 | #include <linux/vmalloc.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 30 | #include <linux/hardirq.h> |
Anshuman Khandual | 0391113 | 2020-04-06 20:03:51 -0700 | [diff] [blame] | 31 | #include <linux/hugetlb.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 32 | #include <linux/rculist.h> |
| 33 | #include <linux/uaccess.h> |
| 34 | #include <linux/syscalls.h> |
| 35 | #include <linux/anon_inodes.h> |
| 36 | #include <linux/kernel_stat.h> |
Matt Fleming | 39bed6c | 2015-01-23 18:45:40 +0000 | [diff] [blame] | 37 | #include <linux/cgroup.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 38 | #include <linux/perf_event.h> |
Steven Rostedt (Red Hat) | af658dc | 2015-04-29 14:36:05 -0400 | [diff] [blame] | 39 | #include <linux/trace_events.h> |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 40 | #include <linux/hw_breakpoint.h> |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 41 | #include <linux/mm_types.h> |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 42 | #include <linux/module.h> |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 43 | #include <linux/mman.h> |
Pawel Moll | b3f2078 | 2014-06-13 16:03:32 +0100 | [diff] [blame] | 44 | #include <linux/compat.h> |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 45 | #include <linux/bpf.h> |
| 46 | #include <linux/filter.h> |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 47 | #include <linux/namei.h> |
| 48 | #include <linux/parser.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 49 | #include <linux/sched/clock.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 50 | #include <linux/sched/mm.h> |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 51 | #include <linux/proc_ns.h> |
| 52 | #include <linux/mount.h> |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 53 | #include <linux/min_heap.h> |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 54 | #include <linux/highmem.h> |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 55 | #include <linux/pgtable.h> |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 56 | #include <linux/buildid.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 57 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 58 | #include "internal.h" |
| 59 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 60 | #include <asm/irq_regs.h> |
| 61 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 62 | typedef int (*remote_function_f)(void *); |
| 63 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 64 | struct remote_function_call { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 65 | struct task_struct *p; |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 66 | remote_function_f func; |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 67 | void *info; |
| 68 | int ret; |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 69 | }; |
| 70 | |
| 71 | static void remote_function(void *data) |
| 72 | { |
| 73 | struct remote_function_call *tfc = data; |
| 74 | struct task_struct *p = tfc->p; |
| 75 | |
| 76 | if (p) { |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 77 | /* -EAGAIN */ |
| 78 | if (task_cpu(p) != smp_processor_id()) |
| 79 | return; |
| 80 | |
| 81 | /* |
| 82 | * Now that we're on right CPU with IRQs disabled, we can test |
| 83 | * if we hit the right task without races. |
| 84 | */ |
| 85 | |
| 86 | tfc->ret = -ESRCH; /* No such (running) process */ |
| 87 | if (p != current) |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 88 | return; |
| 89 | } |
| 90 | |
| 91 | tfc->ret = tfc->func(tfc->info); |
| 92 | } |
| 93 | |
| 94 | /** |
| 95 | * task_function_call - call a function on the cpu on which a task runs |
| 96 | * @p: the task to evaluate |
| 97 | * @func: the function to be called |
| 98 | * @info: the function call argument |
| 99 | * |
| 100 | * Calls the function @func when the task is currently running. This might |
Barret Rhoden | 2ed6edd | 2020-04-14 18:29:20 -0400 | [diff] [blame] | 101 | * be on the current CPU, which just calls the function directly. This will |
| 102 | * retry due to any failures in smp_call_function_single(), such as if the |
| 103 | * task_cpu() goes offline concurrently. |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 104 | * |
Kajol Jain | 6d6b8b9 | 2020-08-27 12:17:32 +0530 | [diff] [blame] | 105 | * returns @func return value or -ESRCH or -ENXIO when the process isn't running |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 106 | */ |
| 107 | static int |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 108 | task_function_call(struct task_struct *p, remote_function_f func, void *info) |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 109 | { |
| 110 | struct remote_function_call data = { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 111 | .p = p, |
| 112 | .func = func, |
| 113 | .info = info, |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 114 | .ret = -EAGAIN, |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 115 | }; |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 116 | int ret; |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 117 | |
Barret Rhoden | 2ed6edd | 2020-04-14 18:29:20 -0400 | [diff] [blame] | 118 | for (;;) { |
| 119 | ret = smp_call_function_single(task_cpu(p), remote_function, |
| 120 | &data, 1); |
Kajol Jain | 6d6b8b9 | 2020-08-27 12:17:32 +0530 | [diff] [blame] | 121 | if (!ret) |
| 122 | ret = data.ret; |
Barret Rhoden | 2ed6edd | 2020-04-14 18:29:20 -0400 | [diff] [blame] | 123 | |
| 124 | if (ret != -EAGAIN) |
| 125 | break; |
| 126 | |
| 127 | cond_resched(); |
| 128 | } |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 129 | |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 130 | return ret; |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | /** |
| 134 | * cpu_function_call - call a function on the cpu |
Haocheng Xie | a1ddf52 | 2021-05-27 11:19:46 +0800 | [diff] [blame] | 135 | * @cpu: target cpu to queue this function |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 136 | * @func: the function to be called |
| 137 | * @info: the function call argument |
| 138 | * |
| 139 | * Calls the function @func on the remote cpu. |
| 140 | * |
| 141 | * returns: @func return value or -ENXIO when the cpu is offline |
| 142 | */ |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 143 | static int cpu_function_call(int cpu, remote_function_f func, void *info) |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 144 | { |
| 145 | struct remote_function_call data = { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 146 | .p = NULL, |
| 147 | .func = func, |
| 148 | .info = info, |
| 149 | .ret = -ENXIO, /* No such CPU */ |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 150 | }; |
| 151 | |
| 152 | smp_call_function_single(cpu, remote_function, &data, 1); |
| 153 | |
| 154 | return data.ret; |
| 155 | } |
| 156 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 157 | static inline struct perf_cpu_context * |
| 158 | __get_cpu_context(struct perf_event_context *ctx) |
| 159 | { |
| 160 | return this_cpu_ptr(ctx->pmu->pmu_cpu_context); |
| 161 | } |
| 162 | |
| 163 | static void perf_ctx_lock(struct perf_cpu_context *cpuctx, |
| 164 | struct perf_event_context *ctx) |
| 165 | { |
| 166 | raw_spin_lock(&cpuctx->ctx.lock); |
| 167 | if (ctx) |
| 168 | raw_spin_lock(&ctx->lock); |
| 169 | } |
| 170 | |
| 171 | static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, |
| 172 | struct perf_event_context *ctx) |
| 173 | { |
| 174 | if (ctx) |
| 175 | raw_spin_unlock(&ctx->lock); |
| 176 | raw_spin_unlock(&cpuctx->ctx.lock); |
| 177 | } |
| 178 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 179 | #define TASK_TOMBSTONE ((void *)-1L) |
| 180 | |
| 181 | static bool is_kernel_event(struct perf_event *event) |
| 182 | { |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 183 | return READ_ONCE(event->owner) == TASK_TOMBSTONE; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 184 | } |
| 185 | |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 186 | /* |
| 187 | * On task ctx scheduling... |
| 188 | * |
| 189 | * When !ctx->nr_events a task context will not be scheduled. This means |
| 190 | * we can disable the scheduler hooks (for performance) without leaving |
| 191 | * pending task ctx state. |
| 192 | * |
| 193 | * This however results in two special cases: |
| 194 | * |
| 195 | * - removing the last event from a task ctx; this is relatively straight |
| 196 | * forward and is done in __perf_remove_from_context. |
| 197 | * |
| 198 | * - adding the first event to a task ctx; this is tricky because we cannot |
| 199 | * rely on ctx->is_active and therefore cannot use event_function_call(). |
| 200 | * See perf_install_in_context(). |
| 201 | * |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 202 | * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set. |
| 203 | */ |
| 204 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 205 | typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *, |
| 206 | struct perf_event_context *, void *); |
| 207 | |
| 208 | struct event_function_struct { |
| 209 | struct perf_event *event; |
| 210 | event_f func; |
| 211 | void *data; |
| 212 | }; |
| 213 | |
| 214 | static int event_function(void *info) |
| 215 | { |
| 216 | struct event_function_struct *efs = info; |
| 217 | struct perf_event *event = efs->event; |
| 218 | struct perf_event_context *ctx = event->ctx; |
| 219 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 220 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 221 | int ret = 0; |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 222 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 223 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 224 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 225 | perf_ctx_lock(cpuctx, task_ctx); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 226 | /* |
| 227 | * Since we do the IPI call without holding ctx->lock things can have |
| 228 | * changed, double check we hit the task we set out to hit. |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 229 | */ |
| 230 | if (ctx->task) { |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 231 | if (ctx->task != current) { |
Peter Zijlstra | 0da4cf3 | 2016-02-24 18:45:51 +0100 | [diff] [blame] | 232 | ret = -ESRCH; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 233 | goto unlock; |
| 234 | } |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 235 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 236 | /* |
| 237 | * We only use event_function_call() on established contexts, |
| 238 | * and event_function() is only ever called when active (or |
| 239 | * rather, we'll have bailed in task_function_call() or the |
| 240 | * above ctx->task != current test), therefore we must have |
| 241 | * ctx->is_active here. |
| 242 | */ |
| 243 | WARN_ON_ONCE(!ctx->is_active); |
| 244 | /* |
| 245 | * And since we have ctx->is_active, cpuctx->task_ctx must |
| 246 | * match. |
| 247 | */ |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 248 | WARN_ON_ONCE(task_ctx != ctx); |
| 249 | } else { |
| 250 | WARN_ON_ONCE(&cpuctx->ctx != ctx); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 251 | } |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 252 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 253 | efs->func(event, cpuctx, ctx, efs->data); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 254 | unlock: |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 255 | perf_ctx_unlock(cpuctx, task_ctx); |
| 256 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 257 | return ret; |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 258 | } |
| 259 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 260 | static void event_function_call(struct perf_event *event, event_f func, void *data) |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 261 | { |
| 262 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 263 | struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 264 | struct event_function_struct efs = { |
| 265 | .event = event, |
| 266 | .func = func, |
| 267 | .data = data, |
| 268 | }; |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 269 | |
Peter Zijlstra | c97f473 | 2016-01-14 10:51:03 +0100 | [diff] [blame] | 270 | if (!event->parent) { |
| 271 | /* |
| 272 | * If this is a !child event, we must hold ctx::mutex to |
Randy Dunlap | c034f48 | 2021-02-25 17:21:10 -0800 | [diff] [blame] | 273 | * stabilize the event->ctx relation. See |
Peter Zijlstra | c97f473 | 2016-01-14 10:51:03 +0100 | [diff] [blame] | 274 | * perf_event_ctx_lock(). |
| 275 | */ |
| 276 | lockdep_assert_held(&ctx->mutex); |
| 277 | } |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 278 | |
| 279 | if (!task) { |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 280 | cpu_function_call(event->cpu, event_function, &efs); |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 281 | return; |
| 282 | } |
| 283 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 284 | if (task == TASK_TOMBSTONE) |
| 285 | return; |
| 286 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 287 | again: |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 288 | if (!task_function_call(task, event_function, &efs)) |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 289 | return; |
| 290 | |
| 291 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 292 | /* |
| 293 | * Reload the task pointer, it might have been changed by |
| 294 | * a concurrent perf_event_context_sched_out(). |
| 295 | */ |
| 296 | task = ctx->task; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 297 | if (task == TASK_TOMBSTONE) { |
| 298 | raw_spin_unlock_irq(&ctx->lock); |
| 299 | return; |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 300 | } |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 301 | if (ctx->is_active) { |
| 302 | raw_spin_unlock_irq(&ctx->lock); |
| 303 | goto again; |
| 304 | } |
| 305 | func(event, NULL, ctx, data); |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 306 | raw_spin_unlock_irq(&ctx->lock); |
| 307 | } |
| 308 | |
Peter Zijlstra | cca2094 | 2016-08-16 13:33:26 +0200 | [diff] [blame] | 309 | /* |
| 310 | * Similar to event_function_call() + event_function(), but hard assumes IRQs |
| 311 | * are already disabled and we're on the right CPU. |
| 312 | */ |
| 313 | static void event_function_local(struct perf_event *event, event_f func, void *data) |
| 314 | { |
| 315 | struct perf_event_context *ctx = event->ctx; |
| 316 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 317 | struct task_struct *task = READ_ONCE(ctx->task); |
| 318 | struct perf_event_context *task_ctx = NULL; |
| 319 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 320 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | cca2094 | 2016-08-16 13:33:26 +0200 | [diff] [blame] | 321 | |
| 322 | if (task) { |
| 323 | if (task == TASK_TOMBSTONE) |
| 324 | return; |
| 325 | |
| 326 | task_ctx = ctx; |
| 327 | } |
| 328 | |
| 329 | perf_ctx_lock(cpuctx, task_ctx); |
| 330 | |
| 331 | task = ctx->task; |
| 332 | if (task == TASK_TOMBSTONE) |
| 333 | goto unlock; |
| 334 | |
| 335 | if (task) { |
| 336 | /* |
| 337 | * We must be either inactive or active and the right task, |
| 338 | * otherwise we're screwed, since we cannot IPI to somewhere |
| 339 | * else. |
| 340 | */ |
| 341 | if (ctx->is_active) { |
| 342 | if (WARN_ON_ONCE(task != current)) |
| 343 | goto unlock; |
| 344 | |
| 345 | if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) |
| 346 | goto unlock; |
| 347 | } |
| 348 | } else { |
| 349 | WARN_ON_ONCE(&cpuctx->ctx != ctx); |
| 350 | } |
| 351 | |
| 352 | func(event, cpuctx, ctx, data); |
| 353 | unlock: |
| 354 | perf_ctx_unlock(cpuctx, task_ctx); |
| 355 | } |
| 356 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 357 | #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ |
| 358 | PERF_FLAG_FD_OUTPUT |\ |
Yann Droneaud | a21b0b3 | 2014-01-05 21:36:33 +0100 | [diff] [blame] | 359 | PERF_FLAG_PID_CGROUP |\ |
| 360 | PERF_FLAG_FD_CLOEXEC) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 361 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 362 | /* |
| 363 | * branch priv levels that need permission checks |
| 364 | */ |
| 365 | #define PERF_SAMPLE_BRANCH_PERM_PLM \ |
| 366 | (PERF_SAMPLE_BRANCH_KERNEL |\ |
| 367 | PERF_SAMPLE_BRANCH_HV) |
| 368 | |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 369 | enum event_type_t { |
| 370 | EVENT_FLEXIBLE = 0x1, |
| 371 | EVENT_PINNED = 0x2, |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 372 | EVENT_TIME = 0x4, |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 373 | /* see ctx_resched() for details */ |
| 374 | EVENT_CPU = 0x8, |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 375 | EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, |
| 376 | }; |
| 377 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 378 | /* |
| 379 | * perf_sched_events : >0 events exist |
| 380 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
| 381 | */ |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 382 | |
| 383 | static void perf_sched_delayed(struct work_struct *work); |
| 384 | DEFINE_STATIC_KEY_FALSE(perf_sched_events); |
| 385 | static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed); |
| 386 | static DEFINE_MUTEX(perf_sched_mutex); |
| 387 | static atomic_t perf_sched_count; |
| 388 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 389 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 390 | static DEFINE_PER_CPU(int, perf_sched_cb_usages); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 391 | static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 392 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 393 | static atomic_t nr_mmap_events __read_mostly; |
| 394 | static atomic_t nr_comm_events __read_mostly; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 395 | static atomic_t nr_namespaces_events __read_mostly; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 396 | static atomic_t nr_task_events __read_mostly; |
Frederic Weisbecker | 948b26b | 2013-08-02 18:29:55 +0200 | [diff] [blame] | 397 | static atomic_t nr_freq_events __read_mostly; |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 398 | static atomic_t nr_switch_events __read_mostly; |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 399 | static atomic_t nr_ksymbol_events __read_mostly; |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 400 | static atomic_t nr_bpf_events __read_mostly; |
Namhyung Kim | 96aaab6 | 2020-03-25 21:45:28 +0900 | [diff] [blame] | 401 | static atomic_t nr_cgroup_events __read_mostly; |
Adrian Hunter | e17d43b | 2020-05-12 15:19:08 +0300 | [diff] [blame] | 402 | static atomic_t nr_text_poke_events __read_mostly; |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 403 | static atomic_t nr_build_id_events __read_mostly; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 404 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 405 | static LIST_HEAD(pmus); |
| 406 | static DEFINE_MUTEX(pmus_lock); |
| 407 | static struct srcu_struct pmus_srcu; |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 408 | static cpumask_var_t perf_online_mask; |
Namhyung Kim | bdacfaf | 2021-03-11 20:54:12 +0900 | [diff] [blame] | 409 | static struct kmem_cache *perf_event_cache; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 410 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 411 | /* |
| 412 | * perf event paranoia level: |
| 413 | * -1 - not paranoid at all |
| 414 | * 0 - disallow raw tracepoint access for unpriv |
| 415 | * 1 - disallow cpu events for unpriv |
| 416 | * 2 - disallow kernel profiling for unpriv |
| 417 | */ |
Andy Lutomirski | 0161028 | 2016-05-09 15:48:51 -0700 | [diff] [blame] | 418 | int sysctl_perf_event_paranoid __read_mostly = 2; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 419 | |
Frederic Weisbecker | 2044338 | 2011-03-31 03:33:29 +0200 | [diff] [blame] | 420 | /* Minimum for 512 kiB + 1 user control page */ |
| 421 | int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 422 | |
| 423 | /* |
| 424 | * max perf event sample rate |
| 425 | */ |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 426 | #define DEFAULT_MAX_SAMPLE_RATE 100000 |
| 427 | #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) |
| 428 | #define DEFAULT_CPU_TIME_MAX_PERCENT 25 |
| 429 | |
| 430 | int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; |
| 431 | |
| 432 | static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); |
| 433 | static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; |
| 434 | |
Peter Zijlstra | d9494cb | 2013-10-17 15:36:19 +0200 | [diff] [blame] | 435 | static int perf_sample_allowed_ns __read_mostly = |
| 436 | DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 437 | |
Geliang Tang | 18ab2cd | 2015-09-27 23:25:50 +0800 | [diff] [blame] | 438 | static void update_perf_cpu_limits(void) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 439 | { |
| 440 | u64 tmp = perf_sample_period_ns; |
| 441 | |
| 442 | tmp *= sysctl_perf_cpu_time_max_percent; |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 443 | tmp = div_u64(tmp, 100); |
| 444 | if (!tmp) |
| 445 | tmp = 1; |
| 446 | |
| 447 | WRITE_ONCE(perf_sample_allowed_ns, tmp); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 448 | } |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 449 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 450 | static bool perf_rotate_context(struct perf_cpu_context *cpuctx); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 451 | |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 452 | int perf_proc_update_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 453 | void *buffer, size_t *lenp, loff_t *ppos) |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 454 | { |
Stephane Eranian | 1a51c5d | 2019-01-10 17:17:16 -0800 | [diff] [blame] | 455 | int ret; |
| 456 | int perf_cpu = sysctl_perf_cpu_time_max_percent; |
Kan Liang | ab7fdef | 2016-05-03 00:26:06 -0700 | [diff] [blame] | 457 | /* |
| 458 | * If throttling is disabled don't allow the write: |
| 459 | */ |
Stephane Eranian | 1a51c5d | 2019-01-10 17:17:16 -0800 | [diff] [blame] | 460 | if (write && (perf_cpu == 100 || perf_cpu == 0)) |
Kan Liang | ab7fdef | 2016-05-03 00:26:06 -0700 | [diff] [blame] | 461 | return -EINVAL; |
| 462 | |
Stephane Eranian | 1a51c5d | 2019-01-10 17:17:16 -0800 | [diff] [blame] | 463 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 464 | if (ret || !write) |
| 465 | return ret; |
| 466 | |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 467 | max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 468 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
| 469 | update_perf_cpu_limits(); |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 470 | |
| 471 | return 0; |
| 472 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 473 | |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 474 | int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; |
| 475 | |
| 476 | int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 477 | void *buffer, size_t *lenp, loff_t *ppos) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 478 | { |
Tan Xiaojun | 1572e45 | 2017-02-23 14:04:39 +0800 | [diff] [blame] | 479 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 480 | |
| 481 | if (ret || !write) |
| 482 | return ret; |
| 483 | |
Peter Zijlstra | b303e7c | 2016-04-04 09:57:40 +0200 | [diff] [blame] | 484 | if (sysctl_perf_cpu_time_max_percent == 100 || |
| 485 | sysctl_perf_cpu_time_max_percent == 0) { |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 486 | printk(KERN_WARNING |
| 487 | "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); |
| 488 | WRITE_ONCE(perf_sample_allowed_ns, 0); |
| 489 | } else { |
| 490 | update_perf_cpu_limits(); |
| 491 | } |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 492 | |
| 493 | return 0; |
| 494 | } |
| 495 | |
| 496 | /* |
| 497 | * perf samples are done in some very critical code paths (NMIs). |
| 498 | * If they take too much CPU time, the system can lock up and not |
| 499 | * get any real work done. This will drop the sample rate when |
| 500 | * we detect that events are taking too long. |
| 501 | */ |
| 502 | #define NR_ACCUMULATED_SAMPLES 128 |
Peter Zijlstra | d9494cb | 2013-10-17 15:36:19 +0200 | [diff] [blame] | 503 | static DEFINE_PER_CPU(u64, running_sample_length); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 504 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 505 | static u64 __report_avg; |
| 506 | static u64 __report_allowed; |
| 507 | |
Peter Zijlstra | 6a02ad66 | 2014-02-03 18:11:08 +0100 | [diff] [blame] | 508 | static void perf_duration_warn(struct irq_work *w) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 509 | { |
David Ahern | 0d87d7e | 2016-08-01 13:49:29 -0700 | [diff] [blame] | 510 | printk_ratelimited(KERN_INFO |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 511 | "perf: interrupt took too long (%lld > %lld), lowering " |
| 512 | "kernel.perf_event_max_sample_rate to %d\n", |
| 513 | __report_avg, __report_allowed, |
| 514 | sysctl_perf_event_sample_rate); |
Peter Zijlstra | 6a02ad66 | 2014-02-03 18:11:08 +0100 | [diff] [blame] | 515 | } |
| 516 | |
| 517 | static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); |
| 518 | |
| 519 | void perf_sample_event_took(u64 sample_len_ns) |
| 520 | { |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 521 | u64 max_len = READ_ONCE(perf_sample_allowed_ns); |
| 522 | u64 running_len; |
| 523 | u64 avg_len; |
| 524 | u32 max; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 525 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 526 | if (max_len == 0) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 527 | return; |
| 528 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 529 | /* Decay the counter by 1 average sample. */ |
| 530 | running_len = __this_cpu_read(running_sample_length); |
| 531 | running_len -= running_len/NR_ACCUMULATED_SAMPLES; |
| 532 | running_len += sample_len_ns; |
| 533 | __this_cpu_write(running_sample_length, running_len); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 534 | |
| 535 | /* |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 536 | * Note: this will be biased artifically low until we have |
| 537 | * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 538 | * from having to maintain a count. |
| 539 | */ |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 540 | avg_len = running_len/NR_ACCUMULATED_SAMPLES; |
| 541 | if (avg_len <= max_len) |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 542 | return; |
| 543 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 544 | __report_avg = avg_len; |
| 545 | __report_allowed = max_len; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 546 | |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 547 | /* |
| 548 | * Compute a throttle threshold 25% below the current duration. |
| 549 | */ |
| 550 | avg_len += avg_len / 4; |
| 551 | max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent; |
| 552 | if (avg_len < max) |
| 553 | max /= (u32)avg_len; |
| 554 | else |
| 555 | max = 1; |
| 556 | |
| 557 | WRITE_ONCE(perf_sample_allowed_ns, avg_len); |
| 558 | WRITE_ONCE(max_samples_per_tick, max); |
| 559 | |
| 560 | sysctl_perf_event_sample_rate = max * HZ; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 561 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
| 562 | |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 563 | if (!irq_work_queue(&perf_duration_work)) { |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 564 | early_printk("perf: interrupt took too long (%lld > %lld), lowering " |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 565 | "kernel.perf_event_max_sample_rate to %d\n", |
Peter Zijlstra | 91a612e | 2016-03-17 15:17:35 +0100 | [diff] [blame] | 566 | __report_avg, __report_allowed, |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 567 | sysctl_perf_event_sample_rate); |
| 568 | } |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 569 | } |
| 570 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 571 | static atomic64_t perf_event_id; |
| 572 | |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 573 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, |
| 574 | enum event_type_t event_type); |
| 575 | |
| 576 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 577 | enum event_type_t event_type, |
| 578 | struct task_struct *task); |
| 579 | |
| 580 | static void update_context_time(struct perf_event_context *ctx); |
| 581 | static u64 perf_event_time(struct perf_event *event); |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 582 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 583 | void __weak perf_event_print_debug(void) { } |
| 584 | |
Stephane Eranian | 0b3fcf1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 585 | static inline u64 perf_clock(void) |
| 586 | { |
| 587 | return local_clock(); |
| 588 | } |
| 589 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 590 | static inline u64 perf_event_clock(struct perf_event *event) |
| 591 | { |
| 592 | return event->clock(); |
| 593 | } |
| 594 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 595 | /* |
| 596 | * State based event timekeeping... |
| 597 | * |
| 598 | * The basic idea is to use event->state to determine which (if any) time |
| 599 | * fields to increment with the current delta. This means we only need to |
| 600 | * update timestamps when we change state or when they are explicitly requested |
| 601 | * (read). |
| 602 | * |
| 603 | * Event groups make things a little more complicated, but not terribly so. The |
| 604 | * rules for a group are that if the group leader is OFF the entire group is |
| 605 | * OFF, irrespecive of what the group member states are. This results in |
| 606 | * __perf_effective_state(). |
| 607 | * |
| 608 | * A futher ramification is that when a group leader flips between OFF and |
| 609 | * !OFF, we need to update all group member times. |
| 610 | * |
| 611 | * |
| 612 | * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we |
| 613 | * need to make sure the relevant context time is updated before we try and |
| 614 | * update our timestamps. |
| 615 | */ |
| 616 | |
| 617 | static __always_inline enum perf_event_state |
| 618 | __perf_effective_state(struct perf_event *event) |
| 619 | { |
| 620 | struct perf_event *leader = event->group_leader; |
| 621 | |
| 622 | if (leader->state <= PERF_EVENT_STATE_OFF) |
| 623 | return leader->state; |
| 624 | |
| 625 | return event->state; |
| 626 | } |
| 627 | |
| 628 | static __always_inline void |
| 629 | __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) |
| 630 | { |
| 631 | enum perf_event_state state = __perf_effective_state(event); |
| 632 | u64 delta = now - event->tstamp; |
| 633 | |
| 634 | *enabled = event->total_time_enabled; |
| 635 | if (state >= PERF_EVENT_STATE_INACTIVE) |
| 636 | *enabled += delta; |
| 637 | |
| 638 | *running = event->total_time_running; |
| 639 | if (state >= PERF_EVENT_STATE_ACTIVE) |
| 640 | *running += delta; |
| 641 | } |
| 642 | |
| 643 | static void perf_event_update_time(struct perf_event *event) |
| 644 | { |
| 645 | u64 now = perf_event_time(event); |
| 646 | |
| 647 | __perf_update_times(event, now, &event->total_time_enabled, |
| 648 | &event->total_time_running); |
| 649 | event->tstamp = now; |
| 650 | } |
| 651 | |
| 652 | static void perf_event_update_sibling_time(struct perf_event *leader) |
| 653 | { |
| 654 | struct perf_event *sibling; |
| 655 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 656 | for_each_sibling_event(sibling, leader) |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 657 | perf_event_update_time(sibling); |
| 658 | } |
| 659 | |
| 660 | static void |
| 661 | perf_event_set_state(struct perf_event *event, enum perf_event_state state) |
| 662 | { |
| 663 | if (event->state == state) |
| 664 | return; |
| 665 | |
| 666 | perf_event_update_time(event); |
| 667 | /* |
| 668 | * If a group leader gets enabled/disabled all its siblings |
| 669 | * are affected too. |
| 670 | */ |
| 671 | if ((event->state < 0) ^ (state < 0)) |
| 672 | perf_event_update_sibling_time(event); |
| 673 | |
| 674 | WRITE_ONCE(event->state, state); |
| 675 | } |
| 676 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 677 | /* |
| 678 | * UP store-release, load-acquire |
| 679 | */ |
| 680 | |
| 681 | #define __store_release(ptr, val) \ |
| 682 | do { \ |
| 683 | barrier(); \ |
| 684 | WRITE_ONCE(*(ptr), (val)); \ |
| 685 | } while (0) |
| 686 | |
| 687 | #define __load_acquire(ptr) \ |
| 688 | ({ \ |
| 689 | __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \ |
| 690 | barrier(); \ |
| 691 | ___p; \ |
| 692 | }) |
| 693 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 694 | #ifdef CONFIG_CGROUP_PERF |
| 695 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 696 | static inline bool |
| 697 | perf_cgroup_match(struct perf_event *event) |
| 698 | { |
| 699 | struct perf_event_context *ctx = event->ctx; |
| 700 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 701 | |
Tejun Heo | ef824fa | 2013-04-08 19:00:38 -0700 | [diff] [blame] | 702 | /* @event doesn't care about cgroup */ |
| 703 | if (!event->cgrp) |
| 704 | return true; |
| 705 | |
| 706 | /* wants specific cgroup scope but @cpuctx isn't associated with any */ |
| 707 | if (!cpuctx->cgrp) |
| 708 | return false; |
| 709 | |
| 710 | /* |
| 711 | * Cgroup scoping is recursive. An event enabled for a cgroup is |
| 712 | * also enabled for all its descendant cgroups. If @cpuctx's |
| 713 | * cgroup is a descendant of @event's (the test covers identity |
| 714 | * case), it's a match. |
| 715 | */ |
| 716 | return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, |
| 717 | event->cgrp->css.cgroup); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 718 | } |
| 719 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 720 | static inline void perf_detach_cgroup(struct perf_event *event) |
| 721 | { |
Zefan Li | 4e2ba65 | 2014-09-19 16:53:14 +0800 | [diff] [blame] | 722 | css_put(&event->cgrp->css); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 723 | event->cgrp = NULL; |
| 724 | } |
| 725 | |
| 726 | static inline int is_cgroup_event(struct perf_event *event) |
| 727 | { |
| 728 | return event->cgrp != NULL; |
| 729 | } |
| 730 | |
| 731 | static inline u64 perf_cgroup_event_time(struct perf_event *event) |
| 732 | { |
| 733 | struct perf_cgroup_info *t; |
| 734 | |
| 735 | t = per_cpu_ptr(event->cgrp->info, event->cpu); |
| 736 | return t->time; |
| 737 | } |
| 738 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 739 | static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 740 | { |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 741 | struct perf_cgroup_info *t; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 742 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 743 | t = per_cpu_ptr(event->cgrp->info, event->cpu); |
| 744 | if (!__load_acquire(&t->active)) |
| 745 | return t->time; |
| 746 | now += READ_ONCE(t->timeoffset); |
| 747 | return now; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 748 | } |
| 749 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 750 | static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv) |
| 751 | { |
| 752 | if (adv) |
| 753 | info->time += now - info->timestamp; |
| 754 | info->timestamp = now; |
| 755 | /* |
| 756 | * see update_context_time() |
| 757 | */ |
| 758 | WRITE_ONCE(info->timeoffset, info->time - info->timestamp); |
| 759 | } |
| 760 | |
| 761 | static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 762 | { |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 763 | struct perf_cgroup *cgrp = cpuctx->cgrp; |
| 764 | struct cgroup_subsys_state *css; |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 765 | struct perf_cgroup_info *info; |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 766 | |
| 767 | if (cgrp) { |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 768 | u64 now = perf_clock(); |
| 769 | |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 770 | for (css = &cgrp->css; css; css = css->parent) { |
| 771 | cgrp = container_of(css, struct perf_cgroup, css); |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 772 | info = this_cpu_ptr(cgrp->info); |
| 773 | |
| 774 | __update_cgrp_time(info, now, true); |
| 775 | if (final) |
| 776 | __store_release(&info->active, 0); |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 777 | } |
| 778 | } |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 779 | } |
| 780 | |
| 781 | static inline void update_cgrp_time_from_event(struct perf_event *event) |
| 782 | { |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 783 | struct perf_cgroup_info *info; |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 784 | struct perf_cgroup *cgrp; |
| 785 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 786 | /* |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 787 | * ensure we access cgroup data only when needed and |
| 788 | * when we know the cgroup is pinned (css_get) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 789 | */ |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 790 | if (!is_cgroup_event(event)) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 791 | return; |
| 792 | |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 793 | cgrp = perf_cgroup_from_task(current, event->ctx); |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 794 | /* |
| 795 | * Do not update time when cgroup is not active |
| 796 | */ |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 797 | if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) { |
| 798 | info = this_cpu_ptr(event->cgrp->info); |
| 799 | __update_cgrp_time(info, perf_clock(), true); |
| 800 | } |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 801 | } |
| 802 | |
| 803 | static inline void |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 804 | perf_cgroup_set_timestamp(struct task_struct *task, |
| 805 | struct perf_event_context *ctx) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 806 | { |
| 807 | struct perf_cgroup *cgrp; |
| 808 | struct perf_cgroup_info *info; |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 809 | struct cgroup_subsys_state *css; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 810 | |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 811 | /* |
| 812 | * ctx->lock held by caller |
| 813 | * ensure we do not access cgroup data |
| 814 | * unless we have the cgroup pinned (css_get) |
| 815 | */ |
| 816 | if (!task || !ctx->nr_cgroups) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 817 | return; |
| 818 | |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 819 | cgrp = perf_cgroup_from_task(task, ctx); |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 820 | |
| 821 | for (css = &cgrp->css; css; css = css->parent) { |
| 822 | cgrp = container_of(css, struct perf_cgroup, css); |
| 823 | info = this_cpu_ptr(cgrp->info); |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 824 | __update_cgrp_time(info, ctx->timestamp, false); |
| 825 | __store_release(&info->active, 1); |
Song Liu | c917e0f2 | 2018-03-12 09:59:43 -0700 | [diff] [blame] | 826 | } |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 827 | } |
| 828 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 829 | static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); |
| 830 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 831 | #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ |
| 832 | #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ |
| 833 | |
| 834 | /* |
| 835 | * reschedule events based on the cgroup constraint of task. |
| 836 | * |
| 837 | * mode SWOUT : schedule out everything |
| 838 | * mode SWIN : schedule in based on cgroup for next |
| 839 | */ |
Geliang Tang | 18ab2cd | 2015-09-27 23:25:50 +0800 | [diff] [blame] | 840 | static void perf_cgroup_switch(struct task_struct *task, int mode) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 841 | { |
| 842 | struct perf_cpu_context *cpuctx; |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 843 | struct list_head *list; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 844 | unsigned long flags; |
| 845 | |
| 846 | /* |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 847 | * Disable interrupts and preemption to avoid this CPU's |
| 848 | * cgrp_cpuctx_entry to change under us. |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 849 | */ |
| 850 | local_irq_save(flags); |
| 851 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 852 | list = this_cpu_ptr(&cgrp_cpuctx_list); |
| 853 | list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) { |
| 854 | WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 855 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 856 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
| 857 | perf_pmu_disable(cpuctx->ctx.pmu); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 858 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 859 | if (mode & PERF_CGROUP_SWOUT) { |
| 860 | cpu_ctx_sched_out(cpuctx, EVENT_ALL); |
| 861 | /* |
| 862 | * must not be done before ctxswout due |
| 863 | * to event_filter_match() in event_sched_out() |
| 864 | */ |
| 865 | cpuctx->cgrp = NULL; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 866 | } |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 867 | |
| 868 | if (mode & PERF_CGROUP_SWIN) { |
| 869 | WARN_ON_ONCE(cpuctx->cgrp); |
| 870 | /* |
| 871 | * set cgrp before ctxsw in to allow |
| 872 | * event_filter_match() to not have to pass |
| 873 | * task around |
| 874 | * we pass the cpuctx->ctx to perf_cgroup_from_task() |
| 875 | * because cgorup events are only per-cpu |
| 876 | */ |
| 877 | cpuctx->cgrp = perf_cgroup_from_task(task, |
| 878 | &cpuctx->ctx); |
| 879 | cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); |
| 880 | } |
| 881 | perf_pmu_enable(cpuctx->ctx.pmu); |
| 882 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 883 | } |
| 884 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 885 | local_irq_restore(flags); |
| 886 | } |
| 887 | |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 888 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
| 889 | struct task_struct *next) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 890 | { |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 891 | struct perf_cgroup *cgrp1; |
| 892 | struct perf_cgroup *cgrp2 = NULL; |
| 893 | |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 894 | rcu_read_lock(); |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 895 | /* |
| 896 | * we come here when we know perf_cgroup_events > 0 |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 897 | * we do not need to pass the ctx here because we know |
| 898 | * we are holding the rcu lock |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 899 | */ |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 900 | cgrp1 = perf_cgroup_from_task(task, NULL); |
Peter Zijlstra | 70a0165 | 2016-01-08 09:29:16 +0100 | [diff] [blame] | 901 | cgrp2 = perf_cgroup_from_task(next, NULL); |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 902 | |
| 903 | /* |
| 904 | * only schedule out current cgroup events if we know |
| 905 | * that we are switching to a different cgroup. Otherwise, |
| 906 | * do no touch the cgroup events. |
| 907 | */ |
| 908 | if (cgrp1 != cgrp2) |
| 909 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT); |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 910 | |
| 911 | rcu_read_unlock(); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 912 | } |
| 913 | |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 914 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
| 915 | struct task_struct *task) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 916 | { |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 917 | struct perf_cgroup *cgrp1; |
| 918 | struct perf_cgroup *cgrp2 = NULL; |
| 919 | |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 920 | rcu_read_lock(); |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 921 | /* |
| 922 | * we come here when we know perf_cgroup_events > 0 |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 923 | * we do not need to pass the ctx here because we know |
| 924 | * we are holding the rcu lock |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 925 | */ |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 926 | cgrp1 = perf_cgroup_from_task(task, NULL); |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 927 | cgrp2 = perf_cgroup_from_task(prev, NULL); |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 928 | |
| 929 | /* |
| 930 | * only need to schedule in cgroup events if we are changing |
| 931 | * cgroup during ctxsw. Cgroup events were not scheduled |
| 932 | * out of ctxsw out if that was not the case. |
| 933 | */ |
| 934 | if (cgrp1 != cgrp2) |
| 935 | perf_cgroup_switch(task, PERF_CGROUP_SWIN); |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 936 | |
| 937 | rcu_read_unlock(); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 938 | } |
| 939 | |
Ian Rogers | c2283c9 | 2020-02-13 23:51:32 -0800 | [diff] [blame] | 940 | static int perf_cgroup_ensure_storage(struct perf_event *event, |
| 941 | struct cgroup_subsys_state *css) |
| 942 | { |
| 943 | struct perf_cpu_context *cpuctx; |
| 944 | struct perf_event **storage; |
| 945 | int cpu, heap_size, ret = 0; |
| 946 | |
| 947 | /* |
| 948 | * Allow storage to have sufficent space for an iterator for each |
| 949 | * possibly nested cgroup plus an iterator for events with no cgroup. |
| 950 | */ |
| 951 | for (heap_size = 1; css; css = css->parent) |
| 952 | heap_size++; |
| 953 | |
| 954 | for_each_possible_cpu(cpu) { |
| 955 | cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); |
| 956 | if (heap_size <= cpuctx->heap_size) |
| 957 | continue; |
| 958 | |
| 959 | storage = kmalloc_node(heap_size * sizeof(struct perf_event *), |
| 960 | GFP_KERNEL, cpu_to_node(cpu)); |
| 961 | if (!storage) { |
| 962 | ret = -ENOMEM; |
| 963 | break; |
| 964 | } |
| 965 | |
| 966 | raw_spin_lock_irq(&cpuctx->ctx.lock); |
| 967 | if (cpuctx->heap_size < heap_size) { |
| 968 | swap(cpuctx->heap, storage); |
| 969 | if (storage == cpuctx->heap_default) |
| 970 | storage = NULL; |
| 971 | cpuctx->heap_size = heap_size; |
| 972 | } |
| 973 | raw_spin_unlock_irq(&cpuctx->ctx.lock); |
| 974 | |
| 975 | kfree(storage); |
| 976 | } |
| 977 | |
| 978 | return ret; |
| 979 | } |
| 980 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 981 | static inline int perf_cgroup_connect(int fd, struct perf_event *event, |
| 982 | struct perf_event_attr *attr, |
| 983 | struct perf_event *group_leader) |
| 984 | { |
| 985 | struct perf_cgroup *cgrp; |
| 986 | struct cgroup_subsys_state *css; |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 987 | struct fd f = fdget(fd); |
| 988 | int ret = 0; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 989 | |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 990 | if (!f.file) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 991 | return -EBADF; |
| 992 | |
Al Viro | b583043 | 2014-10-31 01:22:04 -0400 | [diff] [blame] | 993 | css = css_tryget_online_from_dir(f.file->f_path.dentry, |
Tejun Heo | ec903c0 | 2014-05-13 12:11:01 -0400 | [diff] [blame] | 994 | &perf_event_cgrp_subsys); |
Li Zefan | 3db272c | 2011-03-03 14:25:37 +0800 | [diff] [blame] | 995 | if (IS_ERR(css)) { |
| 996 | ret = PTR_ERR(css); |
| 997 | goto out; |
| 998 | } |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 999 | |
Ian Rogers | c2283c9 | 2020-02-13 23:51:32 -0800 | [diff] [blame] | 1000 | ret = perf_cgroup_ensure_storage(event, css); |
| 1001 | if (ret) |
| 1002 | goto out; |
| 1003 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1004 | cgrp = container_of(css, struct perf_cgroup, css); |
| 1005 | event->cgrp = cgrp; |
| 1006 | |
| 1007 | /* |
| 1008 | * all events in a group must monitor |
| 1009 | * the same cgroup because a task belongs |
| 1010 | * to only one perf cgroup at a time |
| 1011 | */ |
| 1012 | if (group_leader && group_leader->cgrp != cgrp) { |
| 1013 | perf_detach_cgroup(event); |
| 1014 | ret = -EINVAL; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1015 | } |
Li Zefan | 3db272c | 2011-03-03 14:25:37 +0800 | [diff] [blame] | 1016 | out: |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 1017 | fdput(f); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1018 | return ret; |
| 1019 | } |
| 1020 | |
| 1021 | static inline void |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 1022 | perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 1023 | { |
| 1024 | struct perf_cpu_context *cpuctx; |
| 1025 | |
| 1026 | if (!is_cgroup_event(event)) |
| 1027 | return; |
| 1028 | |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 1029 | /* |
| 1030 | * Because cgroup events are always per-cpu events, |
Song Liu | 07c5972 | 2020-01-22 11:50:27 -0800 | [diff] [blame] | 1031 | * @ctx == &cpuctx->ctx. |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 1032 | */ |
Song Liu | 07c5972 | 2020-01-22 11:50:27 -0800 | [diff] [blame] | 1033 | cpuctx = container_of(ctx, struct perf_cpu_context, ctx); |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 1034 | |
| 1035 | /* |
| 1036 | * Since setting cpuctx->cgrp is conditional on the current @cgrp |
| 1037 | * matching the event's cgroup, we must do this for every new event, |
| 1038 | * because if the first would mismatch, the second would not try again |
| 1039 | * and we would leave cpuctx->cgrp unset. |
| 1040 | */ |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 1041 | if (ctx->is_active && !cpuctx->cgrp) { |
Tejun Heo | be96b31 | 2017-10-28 09:49:37 -0700 | [diff] [blame] | 1042 | struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); |
| 1043 | |
Tejun Heo | be96b31 | 2017-10-28 09:49:37 -0700 | [diff] [blame] | 1044 | if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) |
| 1045 | cpuctx->cgrp = cgrp; |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 1046 | } |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 1047 | |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 1048 | if (ctx->nr_cgroups++) |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 1049 | return; |
| 1050 | |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 1051 | list_add(&cpuctx->cgrp_cpuctx_entry, |
| 1052 | per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); |
| 1053 | } |
| 1054 | |
| 1055 | static inline void |
| 1056 | perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) |
| 1057 | { |
| 1058 | struct perf_cpu_context *cpuctx; |
| 1059 | |
| 1060 | if (!is_cgroup_event(event)) |
| 1061 | return; |
| 1062 | |
| 1063 | /* |
| 1064 | * Because cgroup events are always per-cpu events, |
| 1065 | * @ctx == &cpuctx->ctx. |
| 1066 | */ |
| 1067 | cpuctx = container_of(ctx, struct perf_cpu_context, ctx); |
| 1068 | |
| 1069 | if (--ctx->nr_cgroups) |
| 1070 | return; |
| 1071 | |
| 1072 | if (ctx->is_active && cpuctx->cgrp) |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 1073 | cpuctx->cgrp = NULL; |
| 1074 | |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 1075 | list_del(&cpuctx->cgrp_cpuctx_entry); |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 1076 | } |
| 1077 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1078 | #else /* !CONFIG_CGROUP_PERF */ |
| 1079 | |
| 1080 | static inline bool |
| 1081 | perf_cgroup_match(struct perf_event *event) |
| 1082 | { |
| 1083 | return true; |
| 1084 | } |
| 1085 | |
| 1086 | static inline void perf_detach_cgroup(struct perf_event *event) |
| 1087 | {} |
| 1088 | |
| 1089 | static inline int is_cgroup_event(struct perf_event *event) |
| 1090 | { |
| 1091 | return 0; |
| 1092 | } |
| 1093 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1094 | static inline void update_cgrp_time_from_event(struct perf_event *event) |
| 1095 | { |
| 1096 | } |
| 1097 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1098 | static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, |
| 1099 | bool final) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1100 | { |
| 1101 | } |
| 1102 | |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1103 | static inline void perf_cgroup_sched_out(struct task_struct *task, |
| 1104 | struct task_struct *next) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1105 | { |
| 1106 | } |
| 1107 | |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1108 | static inline void perf_cgroup_sched_in(struct task_struct *prev, |
| 1109 | struct task_struct *task) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1110 | { |
| 1111 | } |
| 1112 | |
| 1113 | static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, |
| 1114 | struct perf_event_attr *attr, |
| 1115 | struct perf_event *group_leader) |
| 1116 | { |
| 1117 | return -EINVAL; |
| 1118 | } |
| 1119 | |
| 1120 | static inline void |
Stephane Eranian | 3f7cce3 | 2011-02-18 14:40:01 +0200 | [diff] [blame] | 1121 | perf_cgroup_set_timestamp(struct task_struct *task, |
| 1122 | struct perf_event_context *ctx) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1123 | { |
| 1124 | } |
| 1125 | |
Ben Dooks (Codethink) | d00dbd29 | 2019-11-06 13:25:27 +0000 | [diff] [blame] | 1126 | static inline void |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1127 | perf_cgroup_switch(struct task_struct *task, struct task_struct *next) |
| 1128 | { |
| 1129 | } |
| 1130 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1131 | static inline u64 perf_cgroup_event_time(struct perf_event *event) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1132 | { |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1133 | return 0; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1134 | } |
| 1135 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1136 | static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1137 | { |
| 1138 | return 0; |
| 1139 | } |
| 1140 | |
| 1141 | static inline void |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 1142 | perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 1143 | { |
| 1144 | } |
| 1145 | |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 1146 | static inline void |
| 1147 | perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) |
| 1148 | { |
| 1149 | } |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1150 | #endif |
| 1151 | |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1152 | /* |
| 1153 | * set default to be dependent on timer tick just |
| 1154 | * like original code |
| 1155 | */ |
| 1156 | #define PERF_CPU_HRTIMER (1000 / HZ) |
| 1157 | /* |
Masahiro Yamada | 8a1115f | 2017-03-09 16:16:31 -0800 | [diff] [blame] | 1158 | * function must be called with interrupts disabled |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1159 | */ |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1160 | static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1161 | { |
| 1162 | struct perf_cpu_context *cpuctx; |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 1163 | bool rotations; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1164 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 1165 | lockdep_assert_irqs_disabled(); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1166 | |
| 1167 | cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1168 | rotations = perf_rotate_context(cpuctx); |
| 1169 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1170 | raw_spin_lock(&cpuctx->hrtimer_lock); |
| 1171 | if (rotations) |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1172 | hrtimer_forward_now(hr, cpuctx->hrtimer_interval); |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1173 | else |
| 1174 | cpuctx->hrtimer_active = 0; |
| 1175 | raw_spin_unlock(&cpuctx->hrtimer_lock); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1176 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1177 | return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1178 | } |
| 1179 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1180 | static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1181 | { |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1182 | struct hrtimer *timer = &cpuctx->hrtimer; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1183 | struct pmu *pmu = cpuctx->ctx.pmu; |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1184 | u64 interval; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1185 | |
| 1186 | /* no multiplexing needed for SW PMU */ |
| 1187 | if (pmu->task_ctx_nr == perf_sw_context) |
| 1188 | return; |
| 1189 | |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 1190 | /* |
| 1191 | * check default is sane, if not set then force to |
| 1192 | * default interval (1/tick) |
| 1193 | */ |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1194 | interval = pmu->hrtimer_interval_ms; |
| 1195 | if (interval < 1) |
| 1196 | interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 1197 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1198 | cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1199 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1200 | raw_spin_lock_init(&cpuctx->hrtimer_lock); |
Sebastian Andrzej Siewior | 30f9028 | 2019-07-26 20:30:53 +0200 | [diff] [blame] | 1201 | hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1202 | timer->function = perf_mux_hrtimer_handler; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1203 | } |
| 1204 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1205 | static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1206 | { |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1207 | struct hrtimer *timer = &cpuctx->hrtimer; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1208 | struct pmu *pmu = cpuctx->ctx.pmu; |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1209 | unsigned long flags; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1210 | |
| 1211 | /* not for SW PMU */ |
| 1212 | if (pmu->task_ctx_nr == perf_sw_context) |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1213 | return 0; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1214 | |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1215 | raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); |
| 1216 | if (!cpuctx->hrtimer_active) { |
| 1217 | cpuctx->hrtimer_active = 1; |
| 1218 | hrtimer_forward_now(timer, cpuctx->hrtimer_interval); |
Sebastian Andrzej Siewior | 30f9028 | 2019-07-26 20:30:53 +0200 | [diff] [blame] | 1219 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 1220 | } |
| 1221 | raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1222 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 1223 | return 0; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 1224 | } |
| 1225 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1226 | void perf_pmu_disable(struct pmu *pmu) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1227 | { |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1228 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
| 1229 | if (!(*count)++) |
| 1230 | pmu->pmu_disable(pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1231 | } |
| 1232 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1233 | void perf_pmu_enable(struct pmu *pmu) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1234 | { |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 1235 | int *count = this_cpu_ptr(pmu->pmu_disable_count); |
| 1236 | if (!--(*count)) |
| 1237 | pmu->pmu_enable(pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1238 | } |
| 1239 | |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1240 | static DEFINE_PER_CPU(struct list_head, active_ctx_list); |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1241 | |
| 1242 | /* |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1243 | * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and |
| 1244 | * perf_event_task_tick() are fully serialized because they're strictly cpu |
| 1245 | * affine and perf_event_ctx{activate,deactivate} are called with IRQs |
| 1246 | * disabled, while perf_event_task_tick is called from IRQ context. |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1247 | */ |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1248 | static void perf_event_ctx_activate(struct perf_event_context *ctx) |
Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 1249 | { |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1250 | struct list_head *head = this_cpu_ptr(&active_ctx_list); |
Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 1251 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 1252 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | b5ab4cd | 2010-09-06 16:32:21 +0200 | [diff] [blame] | 1253 | |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1254 | WARN_ON(!list_empty(&ctx->active_ctx_list)); |
| 1255 | |
| 1256 | list_add(&ctx->active_ctx_list, head); |
| 1257 | } |
| 1258 | |
| 1259 | static void perf_event_ctx_deactivate(struct perf_event_context *ctx) |
| 1260 | { |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 1261 | lockdep_assert_irqs_disabled(); |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 1262 | |
| 1263 | WARN_ON(list_empty(&ctx->active_ctx_list)); |
| 1264 | |
| 1265 | list_del_init(&ctx->active_ctx_list); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1266 | } |
| 1267 | |
| 1268 | static void get_ctx(struct perf_event_context *ctx) |
| 1269 | { |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 1270 | refcount_inc(&ctx->refcount); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1271 | } |
| 1272 | |
Kan Liang | ff9ff92 | 2020-07-03 05:49:21 -0700 | [diff] [blame] | 1273 | static void *alloc_task_ctx_data(struct pmu *pmu) |
| 1274 | { |
Kan Liang | 217c2a6 | 2020-07-03 05:49:22 -0700 | [diff] [blame] | 1275 | if (pmu->task_ctx_cache) |
| 1276 | return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); |
| 1277 | |
Kan Liang | 5a09928 | 2020-07-03 05:49:24 -0700 | [diff] [blame] | 1278 | return NULL; |
Kan Liang | ff9ff92 | 2020-07-03 05:49:21 -0700 | [diff] [blame] | 1279 | } |
| 1280 | |
| 1281 | static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data) |
| 1282 | { |
Kan Liang | 217c2a6 | 2020-07-03 05:49:22 -0700 | [diff] [blame] | 1283 | if (pmu->task_ctx_cache && task_ctx_data) |
| 1284 | kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); |
Kan Liang | ff9ff92 | 2020-07-03 05:49:21 -0700 | [diff] [blame] | 1285 | } |
| 1286 | |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 1287 | static void free_ctx(struct rcu_head *head) |
| 1288 | { |
| 1289 | struct perf_event_context *ctx; |
| 1290 | |
| 1291 | ctx = container_of(head, struct perf_event_context, rcu_head); |
Kan Liang | ff9ff92 | 2020-07-03 05:49:21 -0700 | [diff] [blame] | 1292 | free_task_ctx_data(ctx->pmu, ctx->task_ctx_data); |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 1293 | kfree(ctx); |
| 1294 | } |
| 1295 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1296 | static void put_ctx(struct perf_event_context *ctx) |
| 1297 | { |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 1298 | if (refcount_dec_and_test(&ctx->refcount)) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1299 | if (ctx->parent_ctx) |
| 1300 | put_ctx(ctx->parent_ctx); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 1301 | if (ctx->task && ctx->task != TASK_TOMBSTONE) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1302 | put_task_struct(ctx->task); |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 1303 | call_rcu(&ctx->rcu_head, free_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1304 | } |
| 1305 | } |
| 1306 | |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 1307 | /* |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1308 | * Because of perf_event::ctx migration in sys_perf_event_open::move_group and |
| 1309 | * perf_pmu_migrate_context() we need some magic. |
| 1310 | * |
| 1311 | * Those places that change perf_event::ctx will hold both |
| 1312 | * perf_event_ctx::mutex of the 'old' and 'new' ctx value. |
| 1313 | * |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 1314 | * Lock ordering is by mutex address. There are two other sites where |
| 1315 | * perf_event_context::mutex nests and those are: |
| 1316 | * |
| 1317 | * - perf_event_exit_task_context() [ child , 0 ] |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 1318 | * perf_event_exit_event() |
| 1319 | * put_event() [ parent, 1 ] |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 1320 | * |
| 1321 | * - perf_event_init_context() [ parent, 0 ] |
| 1322 | * inherit_task_group() |
| 1323 | * inherit_group() |
| 1324 | * inherit_event() |
| 1325 | * perf_event_alloc() |
| 1326 | * perf_init_event() |
| 1327 | * perf_try_init_event() [ child , 1 ] |
| 1328 | * |
| 1329 | * While it appears there is an obvious deadlock here -- the parent and child |
| 1330 | * nesting levels are inverted between the two. This is in fact safe because |
| 1331 | * life-time rules separate them. That is an exiting task cannot fork, and a |
| 1332 | * spawning task cannot (yet) exit. |
| 1333 | * |
Randy Dunlap | c034f48 | 2021-02-25 17:21:10 -0800 | [diff] [blame] | 1334 | * But remember that these are parent<->child context relations, and |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 1335 | * migration does not affect children, therefore these two orderings should not |
| 1336 | * interact. |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1337 | * |
| 1338 | * The change in perf_event::ctx does not affect children (as claimed above) |
| 1339 | * because the sys_perf_event_open() case will install a new event and break |
| 1340 | * the ctx parent<->child relation, and perf_pmu_migrate_context() is only |
| 1341 | * concerned with cpuctx and that doesn't have children. |
| 1342 | * |
| 1343 | * The places that change perf_event::ctx will issue: |
| 1344 | * |
| 1345 | * perf_remove_from_context(); |
| 1346 | * synchronize_rcu(); |
| 1347 | * perf_install_in_context(); |
| 1348 | * |
| 1349 | * to affect the change. The remove_from_context() + synchronize_rcu() should |
| 1350 | * quiesce the event, after which we can install it in the new location. This |
| 1351 | * means that only external vectors (perf_fops, prctl) can perturb the event |
| 1352 | * while in transit. Therefore all such accessors should also acquire |
| 1353 | * perf_event_context::mutex to serialize against this. |
| 1354 | * |
| 1355 | * However; because event->ctx can change while we're waiting to acquire |
| 1356 | * ctx->mutex we must be careful and use the below perf_event_ctx_lock() |
| 1357 | * function. |
| 1358 | * |
| 1359 | * Lock order: |
Eric W. Biederman | f7cfd87 | 2020-12-03 14:12:00 -0600 | [diff] [blame] | 1360 | * exec_update_lock |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1361 | * task_struct::perf_event_mutex |
| 1362 | * perf_event_context::mutex |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1363 | * perf_event::child_mutex; |
Peter Zijlstra | 07c4a77 | 2016-01-26 12:15:37 +0100 | [diff] [blame] | 1364 | * perf_event_context::lock |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1365 | * perf_event::mmap_mutex |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1366 | * mmap_lock |
Alexander Shishkin | 18736ee | 2019-02-15 13:56:54 +0200 | [diff] [blame] | 1367 | * perf_addr_filters_head::lock |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 1368 | * |
| 1369 | * cpu_hotplug_lock |
| 1370 | * pmus_lock |
| 1371 | * cpuctx->mutex / perf_event_context::mutex |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1372 | */ |
Peter Zijlstra | a83fe28 | 2015-01-29 14:44:34 +0100 | [diff] [blame] | 1373 | static struct perf_event_context * |
| 1374 | perf_event_ctx_lock_nested(struct perf_event *event, int nesting) |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1375 | { |
| 1376 | struct perf_event_context *ctx; |
| 1377 | |
| 1378 | again: |
| 1379 | rcu_read_lock(); |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 1380 | ctx = READ_ONCE(event->ctx); |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 1381 | if (!refcount_inc_not_zero(&ctx->refcount)) { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1382 | rcu_read_unlock(); |
| 1383 | goto again; |
| 1384 | } |
| 1385 | rcu_read_unlock(); |
| 1386 | |
Peter Zijlstra | a83fe28 | 2015-01-29 14:44:34 +0100 | [diff] [blame] | 1387 | mutex_lock_nested(&ctx->mutex, nesting); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1388 | if (event->ctx != ctx) { |
| 1389 | mutex_unlock(&ctx->mutex); |
| 1390 | put_ctx(ctx); |
| 1391 | goto again; |
| 1392 | } |
| 1393 | |
| 1394 | return ctx; |
| 1395 | } |
| 1396 | |
Peter Zijlstra | a83fe28 | 2015-01-29 14:44:34 +0100 | [diff] [blame] | 1397 | static inline struct perf_event_context * |
| 1398 | perf_event_ctx_lock(struct perf_event *event) |
| 1399 | { |
| 1400 | return perf_event_ctx_lock_nested(event, 0); |
| 1401 | } |
| 1402 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 1403 | static void perf_event_ctx_unlock(struct perf_event *event, |
| 1404 | struct perf_event_context *ctx) |
| 1405 | { |
| 1406 | mutex_unlock(&ctx->mutex); |
| 1407 | put_ctx(ctx); |
| 1408 | } |
| 1409 | |
| 1410 | /* |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 1411 | * This must be done under the ctx->lock, such as to serialize against |
| 1412 | * context_equiv(), therefore we cannot call put_ctx() since that might end up |
| 1413 | * calling scheduler related locks and ctx->lock nests inside those. |
| 1414 | */ |
| 1415 | static __must_check struct perf_event_context * |
| 1416 | unclone_ctx(struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1417 | { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 1418 | struct perf_event_context *parent_ctx = ctx->parent_ctx; |
| 1419 | |
| 1420 | lockdep_assert_held(&ctx->lock); |
| 1421 | |
| 1422 | if (parent_ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1423 | ctx->parent_ctx = NULL; |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 1424 | ctx->generation++; |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 1425 | |
| 1426 | return parent_ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1427 | } |
| 1428 | |
Oleg Nesterov | 1d95311 | 2017-08-22 17:59:28 +0200 | [diff] [blame] | 1429 | static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, |
| 1430 | enum pid_type type) |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1431 | { |
Oleg Nesterov | 1d95311 | 2017-08-22 17:59:28 +0200 | [diff] [blame] | 1432 | u32 nr; |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1433 | /* |
| 1434 | * only top level events have the pid namespace they were created in |
| 1435 | */ |
| 1436 | if (event->parent) |
| 1437 | event = event->parent; |
| 1438 | |
Oleg Nesterov | 1d95311 | 2017-08-22 17:59:28 +0200 | [diff] [blame] | 1439 | nr = __task_pid_nr_ns(p, type, event->ns); |
| 1440 | /* avoid -1 if it is idle thread or runs in another ns */ |
| 1441 | if (!nr && !pid_alive(p)) |
| 1442 | nr = -1; |
| 1443 | return nr; |
| 1444 | } |
| 1445 | |
| 1446 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) |
| 1447 | { |
Eric W. Biederman | 6883f81 | 2017-06-04 04:32:13 -0500 | [diff] [blame] | 1448 | return perf_event_pid_type(event, p, PIDTYPE_TGID); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1449 | } |
| 1450 | |
| 1451 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) |
| 1452 | { |
Oleg Nesterov | 1d95311 | 2017-08-22 17:59:28 +0200 | [diff] [blame] | 1453 | return perf_event_pid_type(event, p, PIDTYPE_PID); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1454 | } |
| 1455 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1456 | /* |
| 1457 | * If we inherit events we want to return the parent event id |
| 1458 | * to userspace. |
| 1459 | */ |
| 1460 | static u64 primary_event_id(struct perf_event *event) |
| 1461 | { |
| 1462 | u64 id = event->id; |
| 1463 | |
| 1464 | if (event->parent) |
| 1465 | id = event->parent->id; |
| 1466 | |
| 1467 | return id; |
| 1468 | } |
| 1469 | |
| 1470 | /* |
| 1471 | * Get the perf_event_context for a task and lock it. |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 1472 | * |
Randy Dunlap | c034f48 | 2021-02-25 17:21:10 -0800 | [diff] [blame] | 1473 | * This has to cope with the fact that until it is locked, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1474 | * the context could get moved to another task. |
| 1475 | */ |
| 1476 | static struct perf_event_context * |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1477 | perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1478 | { |
| 1479 | struct perf_event_context *ctx; |
| 1480 | |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 1481 | retry: |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1482 | /* |
| 1483 | * One of the few rules of preemptible RCU is that one cannot do |
| 1484 | * rcu_read_unlock() while holding a scheduler (or nested) lock when |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1485 | * part of the read side critical section was irqs-enabled -- see |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1486 | * rcu_read_unlock_special(). |
| 1487 | * |
| 1488 | * Since ctx->lock nests under rq->lock we must ensure the entire read |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1489 | * side critical section has interrupts disabled. |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1490 | */ |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1491 | local_irq_save(*flags); |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1492 | rcu_read_lock(); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1493 | ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1494 | if (ctx) { |
| 1495 | /* |
| 1496 | * If this context is a clone of another, it might |
| 1497 | * get swapped for another underneath us by |
| 1498 | * perf_event_task_sched_out, though the |
| 1499 | * rcu_read_lock() protects us from any context |
| 1500 | * getting freed. Lock the context and check if it |
| 1501 | * got swapped before we could get the lock, and retry |
| 1502 | * if so. If we locked the right context, then it |
| 1503 | * can't get swapped on us any more. |
| 1504 | */ |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1505 | raw_spin_lock(&ctx->lock); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1506 | if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1507 | raw_spin_unlock(&ctx->lock); |
Peter Zijlstra | 058ebd0 | 2013-07-12 11:08:33 +0200 | [diff] [blame] | 1508 | rcu_read_unlock(); |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1509 | local_irq_restore(*flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1510 | goto retry; |
| 1511 | } |
| 1512 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 1513 | if (ctx->task == TASK_TOMBSTONE || |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 1514 | !refcount_inc_not_zero(&ctx->refcount)) { |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1515 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1516 | ctx = NULL; |
Peter Zijlstra | 828b6f0 | 2016-01-27 21:59:04 +0100 | [diff] [blame] | 1517 | } else { |
| 1518 | WARN_ON_ONCE(ctx->task != task); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1519 | } |
| 1520 | } |
| 1521 | rcu_read_unlock(); |
Paul E. McKenney | 2fd5907 | 2015-11-04 05:48:38 -0800 | [diff] [blame] | 1522 | if (!ctx) |
| 1523 | local_irq_restore(*flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1524 | return ctx; |
| 1525 | } |
| 1526 | |
| 1527 | /* |
| 1528 | * Get the context for a task and increment its pin_count so it |
| 1529 | * can't get swapped to another task. This also increments its |
| 1530 | * reference count so that the context can't get freed. |
| 1531 | */ |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1532 | static struct perf_event_context * |
| 1533 | perf_pin_task_context(struct task_struct *task, int ctxn) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1534 | { |
| 1535 | struct perf_event_context *ctx; |
| 1536 | unsigned long flags; |
| 1537 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 1538 | ctx = perf_lock_task_context(task, ctxn, &flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1539 | if (ctx) { |
| 1540 | ++ctx->pin_count; |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1541 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1542 | } |
| 1543 | return ctx; |
| 1544 | } |
| 1545 | |
| 1546 | static void perf_unpin_context(struct perf_event_context *ctx) |
| 1547 | { |
| 1548 | unsigned long flags; |
| 1549 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1550 | raw_spin_lock_irqsave(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1551 | --ctx->pin_count; |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 1552 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1553 | } |
| 1554 | |
Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 1555 | /* |
| 1556 | * Update the record of the current time in a context. |
| 1557 | */ |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1558 | static void __update_context_time(struct perf_event_context *ctx, bool adv) |
Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 1559 | { |
| 1560 | u64 now = perf_clock(); |
| 1561 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1562 | if (adv) |
| 1563 | ctx->time += now - ctx->timestamp; |
Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 1564 | ctx->timestamp = now; |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1565 | |
| 1566 | /* |
| 1567 | * The above: time' = time + (now - timestamp), can be re-arranged |
| 1568 | * into: time` = now + (time - timestamp), which gives a single value |
| 1569 | * offset to compute future time without locks on. |
| 1570 | * |
| 1571 | * See perf_event_time_now(), which can be used from NMI context where |
| 1572 | * it's (obviously) not possible to acquire ctx->lock in order to read |
| 1573 | * both the above values in a consistent manner. |
| 1574 | */ |
| 1575 | WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp); |
| 1576 | } |
| 1577 | |
| 1578 | static void update_context_time(struct perf_event_context *ctx) |
| 1579 | { |
| 1580 | __update_context_time(ctx, true); |
Peter Zijlstra | f67218c | 2009-11-23 11:37:27 +0100 | [diff] [blame] | 1581 | } |
| 1582 | |
Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1583 | static u64 perf_event_time(struct perf_event *event) |
| 1584 | { |
| 1585 | struct perf_event_context *ctx = event->ctx; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1586 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1587 | if (unlikely(!ctx)) |
| 1588 | return 0; |
| 1589 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1590 | if (is_cgroup_event(event)) |
| 1591 | return perf_cgroup_event_time(event); |
| 1592 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 1593 | return ctx->time; |
| 1594 | } |
| 1595 | |
| 1596 | static u64 perf_event_time_now(struct perf_event *event, u64 now) |
| 1597 | { |
| 1598 | struct perf_event_context *ctx = event->ctx; |
| 1599 | |
| 1600 | if (unlikely(!ctx)) |
| 1601 | return 0; |
| 1602 | |
| 1603 | if (is_cgroup_event(event)) |
| 1604 | return perf_cgroup_event_time_now(event, now); |
| 1605 | |
| 1606 | if (!(__load_acquire(&ctx->is_active) & EVENT_TIME)) |
| 1607 | return ctx->time; |
| 1608 | |
| 1609 | now += READ_ONCE(ctx->timeoffset); |
| 1610 | return now; |
Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 1611 | } |
| 1612 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 1613 | static enum event_type_t get_event_type(struct perf_event *event) |
| 1614 | { |
| 1615 | struct perf_event_context *ctx = event->ctx; |
| 1616 | enum event_type_t event_type; |
| 1617 | |
| 1618 | lockdep_assert_held(&ctx->lock); |
| 1619 | |
Alexander Shishkin | 3bda69c | 2017-07-18 14:08:34 +0300 | [diff] [blame] | 1620 | /* |
| 1621 | * It's 'group type', really, because if our group leader is |
| 1622 | * pinned, so are we. |
| 1623 | */ |
| 1624 | if (event->group_leader != event) |
| 1625 | event = event->group_leader; |
| 1626 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 1627 | event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; |
| 1628 | if (!ctx->task) |
| 1629 | event_type |= EVENT_CPU; |
| 1630 | |
| 1631 | return event_type; |
| 1632 | } |
| 1633 | |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1634 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1635 | * Helper function to initialize event group nodes. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1636 | */ |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1637 | static void init_event_group(struct perf_event *event) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1638 | { |
| 1639 | RB_CLEAR_NODE(&event->group_node); |
| 1640 | event->group_index = 0; |
| 1641 | } |
| 1642 | |
| 1643 | /* |
| 1644 | * Extract pinned or flexible groups from the context |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1645 | * based on event attrs bits. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1646 | */ |
| 1647 | static struct perf_event_groups * |
| 1648 | get_event_groups(struct perf_event *event, struct perf_event_context *ctx) |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 1649 | { |
| 1650 | if (event->attr.pinned) |
| 1651 | return &ctx->pinned_groups; |
| 1652 | else |
| 1653 | return &ctx->flexible_groups; |
| 1654 | } |
| 1655 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1656 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1657 | * Helper function to initializes perf_event_group trees. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1658 | */ |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1659 | static void perf_event_groups_init(struct perf_event_groups *groups) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1660 | { |
| 1661 | groups->tree = RB_ROOT; |
| 1662 | groups->index = 0; |
| 1663 | } |
| 1664 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1665 | static inline struct cgroup *event_cgroup(const struct perf_event *event) |
| 1666 | { |
| 1667 | struct cgroup *cgroup = NULL; |
| 1668 | |
| 1669 | #ifdef CONFIG_CGROUP_PERF |
| 1670 | if (event->cgrp) |
| 1671 | cgroup = event->cgrp->css.cgroup; |
| 1672 | #endif |
| 1673 | |
| 1674 | return cgroup; |
| 1675 | } |
| 1676 | |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1677 | /* |
| 1678 | * Compare function for event groups; |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1679 | * |
| 1680 | * Implements complex key that first sorts by CPU and then by virtual index |
| 1681 | * which provides ordering when rotating groups for the same CPU. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1682 | */ |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1683 | static __always_inline int |
| 1684 | perf_event_groups_cmp(const int left_cpu, const struct cgroup *left_cgroup, |
| 1685 | const u64 left_group_index, const struct perf_event *right) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1686 | { |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1687 | if (left_cpu < right->cpu) |
| 1688 | return -1; |
| 1689 | if (left_cpu > right->cpu) |
| 1690 | return 1; |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1691 | |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 1692 | #ifdef CONFIG_CGROUP_PERF |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1693 | { |
| 1694 | const struct cgroup *right_cgroup = event_cgroup(right); |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 1695 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1696 | if (left_cgroup != right_cgroup) { |
| 1697 | if (!left_cgroup) { |
| 1698 | /* |
| 1699 | * Left has no cgroup but right does, no |
| 1700 | * cgroups come first. |
| 1701 | */ |
| 1702 | return -1; |
| 1703 | } |
| 1704 | if (!right_cgroup) { |
| 1705 | /* |
| 1706 | * Right has no cgroup but left does, no |
| 1707 | * cgroups come first. |
| 1708 | */ |
| 1709 | return 1; |
| 1710 | } |
| 1711 | /* Two dissimilar cgroups, order by id. */ |
| 1712 | if (cgroup_id(left_cgroup) < cgroup_id(right_cgroup)) |
| 1713 | return -1; |
| 1714 | |
| 1715 | return 1; |
| 1716 | } |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 1717 | } |
| 1718 | #endif |
| 1719 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1720 | if (left_group_index < right->group_index) |
| 1721 | return -1; |
| 1722 | if (left_group_index > right->group_index) |
| 1723 | return 1; |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1724 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1725 | return 0; |
| 1726 | } |
| 1727 | |
| 1728 | #define __node_2_pe(node) \ |
| 1729 | rb_entry((node), struct perf_event, group_node) |
| 1730 | |
| 1731 | static inline bool __group_less(struct rb_node *a, const struct rb_node *b) |
| 1732 | { |
| 1733 | struct perf_event *e = __node_2_pe(a); |
| 1734 | return perf_event_groups_cmp(e->cpu, event_cgroup(e), e->group_index, |
| 1735 | __node_2_pe(b)) < 0; |
| 1736 | } |
| 1737 | |
| 1738 | struct __group_key { |
| 1739 | int cpu; |
| 1740 | struct cgroup *cgroup; |
| 1741 | }; |
| 1742 | |
| 1743 | static inline int __group_cmp(const void *key, const struct rb_node *node) |
| 1744 | { |
| 1745 | const struct __group_key *a = key; |
| 1746 | const struct perf_event *b = __node_2_pe(node); |
| 1747 | |
| 1748 | /* partial/subtree match: @cpu, @cgroup; ignore: @group_index */ |
| 1749 | return perf_event_groups_cmp(a->cpu, a->cgroup, b->group_index, b); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1750 | } |
| 1751 | |
| 1752 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1753 | * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for |
| 1754 | * key (see perf_event_groups_less). This places it last inside the CPU |
| 1755 | * subtree. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1756 | */ |
| 1757 | static void |
| 1758 | perf_event_groups_insert(struct perf_event_groups *groups, |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1759 | struct perf_event *event) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1760 | { |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1761 | event->group_index = ++groups->index; |
| 1762 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1763 | rb_add(&event->group_node, &groups->tree, __group_less); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1764 | } |
| 1765 | |
| 1766 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1767 | * Helper function to insert event into the pinned or flexible groups. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1768 | */ |
| 1769 | static void |
| 1770 | add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) |
| 1771 | { |
| 1772 | struct perf_event_groups *groups; |
| 1773 | |
| 1774 | groups = get_event_groups(event, ctx); |
| 1775 | perf_event_groups_insert(groups, event); |
| 1776 | } |
| 1777 | |
| 1778 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1779 | * Delete a group from a tree. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1780 | */ |
| 1781 | static void |
| 1782 | perf_event_groups_delete(struct perf_event_groups *groups, |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1783 | struct perf_event *event) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1784 | { |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1785 | WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || |
| 1786 | RB_EMPTY_ROOT(&groups->tree)); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1787 | |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1788 | rb_erase(&event->group_node, &groups->tree); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1789 | init_event_group(event); |
| 1790 | } |
| 1791 | |
| 1792 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1793 | * Helper function to delete event from its groups. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1794 | */ |
| 1795 | static void |
| 1796 | del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) |
| 1797 | { |
| 1798 | struct perf_event_groups *groups; |
| 1799 | |
| 1800 | groups = get_event_groups(event, ctx); |
| 1801 | perf_event_groups_delete(groups, event); |
| 1802 | } |
| 1803 | |
| 1804 | /* |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 1805 | * Get the leftmost event in the cpu/cgroup subtree. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1806 | */ |
| 1807 | static struct perf_event * |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 1808 | perf_event_groups_first(struct perf_event_groups *groups, int cpu, |
| 1809 | struct cgroup *cgrp) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1810 | { |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1811 | struct __group_key key = { |
| 1812 | .cpu = cpu, |
| 1813 | .cgroup = cgrp, |
| 1814 | }; |
| 1815 | struct rb_node *node; |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 1816 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1817 | node = rb_find_first(&key, &groups->tree, __group_cmp); |
| 1818 | if (node) |
| 1819 | return __node_2_pe(node); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1820 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1821 | return NULL; |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1822 | } |
| 1823 | |
| 1824 | /* |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 1825 | * Like rb_entry_next_safe() for the @cpu subtree. |
| 1826 | */ |
| 1827 | static struct perf_event * |
| 1828 | perf_event_groups_next(struct perf_event *event) |
| 1829 | { |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1830 | struct __group_key key = { |
| 1831 | .cpu = event->cpu, |
| 1832 | .cgroup = event_cgroup(event), |
| 1833 | }; |
| 1834 | struct rb_node *next; |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 1835 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1836 | next = rb_next_match(&key, &event->group_node, __group_cmp); |
| 1837 | if (next) |
| 1838 | return __node_2_pe(next); |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 1839 | |
Peter Zijlstra | a3b8986 | 2020-04-29 17:05:15 +0200 | [diff] [blame] | 1840 | return NULL; |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 1841 | } |
| 1842 | |
| 1843 | /* |
Peter Zijlstra | 161c85f | 2017-11-13 14:28:27 +0100 | [diff] [blame] | 1844 | * Iterate through the whole groups tree. |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1845 | */ |
Peter Zijlstra | 6e6804d | 2017-11-13 14:28:41 +0100 | [diff] [blame] | 1846 | #define perf_event_groups_for_each(event, groups) \ |
| 1847 | for (event = rb_entry_safe(rb_first(&((groups)->tree)), \ |
| 1848 | typeof(*event), group_node); event; \ |
| 1849 | event = rb_entry_safe(rb_next(&event->group_node), \ |
| 1850 | typeof(*event), group_node)) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1851 | |
| 1852 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 1853 | * Add an event from the lists for its context. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1854 | * Must be called with ctx->mutex and ctx->lock held. |
| 1855 | */ |
| 1856 | static void |
| 1857 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
| 1858 | { |
Peter Zijlstra | c994d61 | 2016-01-08 09:20:23 +0100 | [diff] [blame] | 1859 | lockdep_assert_held(&ctx->lock); |
| 1860 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1861 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); |
| 1862 | event->attach_state |= PERF_ATTACH_CONTEXT; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1863 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 1864 | event->tstamp = perf_event_time(event); |
| 1865 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1866 | /* |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1867 | * If we're a stand alone event or group leader, we go to the context |
| 1868 | * list, group events are kept attached to the group so that |
| 1869 | * perf_group_detach can, at all times, locate all siblings. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1870 | */ |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 1871 | if (event->group_leader == event) { |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 1872 | event->group_caps = event->event_caps; |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 1873 | add_event_to_groups(event, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1874 | } |
| 1875 | |
| 1876 | list_add_rcu(&event->event_entry, &ctx->event_list); |
| 1877 | ctx->nr_events++; |
Rob Herring | 82ff0c0 | 2021-12-08 14:11:21 -0600 | [diff] [blame] | 1878 | if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) |
| 1879 | ctx->nr_user++; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1880 | if (event->attr.inherit_stat) |
| 1881 | ctx->nr_stat++; |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 1882 | |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 1883 | if (event->state > PERF_EVENT_STATE_OFF) |
| 1884 | perf_cgroup_event_enable(event, ctx); |
| 1885 | |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 1886 | ctx->generation++; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1887 | } |
| 1888 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1889 | /* |
Jiri Olsa | 0231bb5 | 2013-02-01 11:23:45 +0100 | [diff] [blame] | 1890 | * Initialize event state based on the perf_event_attr::disabled. |
| 1891 | */ |
| 1892 | static inline void perf_event__state_init(struct perf_event *event) |
| 1893 | { |
| 1894 | event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : |
| 1895 | PERF_EVENT_STATE_INACTIVE; |
| 1896 | } |
| 1897 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1898 | static void __perf_event_read_size(struct perf_event *event, int nr_siblings) |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1899 | { |
| 1900 | int entry = sizeof(u64); /* value */ |
| 1901 | int size = 0; |
| 1902 | int nr = 1; |
| 1903 | |
| 1904 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 1905 | size += sizeof(u64); |
| 1906 | |
| 1907 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 1908 | size += sizeof(u64); |
| 1909 | |
| 1910 | if (event->attr.read_format & PERF_FORMAT_ID) |
| 1911 | entry += sizeof(u64); |
| 1912 | |
| 1913 | if (event->attr.read_format & PERF_FORMAT_GROUP) { |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1914 | nr += nr_siblings; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1915 | size += sizeof(u64); |
| 1916 | } |
| 1917 | |
| 1918 | size += entry * nr; |
| 1919 | event->read_size = size; |
| 1920 | } |
| 1921 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1922 | static void __perf_event_header_size(struct perf_event *event, u64 sample_type) |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1923 | { |
| 1924 | struct perf_sample_data *data; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1925 | u16 size = 0; |
| 1926 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1927 | if (sample_type & PERF_SAMPLE_IP) |
| 1928 | size += sizeof(data->ip); |
| 1929 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1930 | if (sample_type & PERF_SAMPLE_ADDR) |
| 1931 | size += sizeof(data->addr); |
| 1932 | |
| 1933 | if (sample_type & PERF_SAMPLE_PERIOD) |
| 1934 | size += sizeof(data->period); |
| 1935 | |
Kan Liang | 2a6c6b7 | 2021-01-28 14:40:07 -0800 | [diff] [blame] | 1936 | if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) |
| 1937 | size += sizeof(data->weight.full); |
Andi Kleen | c3feedf | 2013-01-24 16:10:28 +0100 | [diff] [blame] | 1938 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1939 | if (sample_type & PERF_SAMPLE_READ) |
| 1940 | size += event->read_size; |
| 1941 | |
Stephane Eranian | d6be9ad | 2013-01-24 16:10:31 +0100 | [diff] [blame] | 1942 | if (sample_type & PERF_SAMPLE_DATA_SRC) |
| 1943 | size += sizeof(data->data_src.val); |
| 1944 | |
Andi Kleen | fdfbbd0 | 2013-09-20 07:40:39 -0700 | [diff] [blame] | 1945 | if (sample_type & PERF_SAMPLE_TRANSACTION) |
| 1946 | size += sizeof(data->txn); |
| 1947 | |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 1948 | if (sample_type & PERF_SAMPLE_PHYS_ADDR) |
| 1949 | size += sizeof(data->phys_addr); |
| 1950 | |
Namhyung Kim | 6546b19 | 2020-03-25 21:45:29 +0900 | [diff] [blame] | 1951 | if (sample_type & PERF_SAMPLE_CGROUP) |
| 1952 | size += sizeof(data->cgroup); |
| 1953 | |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 1954 | if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) |
| 1955 | size += sizeof(data->data_page_size); |
| 1956 | |
Stephane Eranian | 995f088 | 2020-10-01 06:57:49 -0700 | [diff] [blame] | 1957 | if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) |
| 1958 | size += sizeof(data->code_page_size); |
| 1959 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1960 | event->header_size = size; |
| 1961 | } |
| 1962 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 1963 | /* |
| 1964 | * Called at perf_event creation and when events are attached/detached from a |
| 1965 | * group. |
| 1966 | */ |
| 1967 | static void perf_event__header_size(struct perf_event *event) |
| 1968 | { |
| 1969 | __perf_event_read_size(event, |
| 1970 | event->group_leader->nr_siblings); |
| 1971 | __perf_event_header_size(event, event->attr.sample_type); |
| 1972 | } |
| 1973 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1974 | static void perf_event__id_header_size(struct perf_event *event) |
| 1975 | { |
| 1976 | struct perf_sample_data *data; |
| 1977 | u64 sample_type = event->attr.sample_type; |
| 1978 | u16 size = 0; |
| 1979 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1980 | if (sample_type & PERF_SAMPLE_TID) |
| 1981 | size += sizeof(data->tid_entry); |
| 1982 | |
| 1983 | if (sample_type & PERF_SAMPLE_TIME) |
| 1984 | size += sizeof(data->time); |
| 1985 | |
Adrian Hunter | ff3d527 | 2013-08-27 11:23:07 +0300 | [diff] [blame] | 1986 | if (sample_type & PERF_SAMPLE_IDENTIFIER) |
| 1987 | size += sizeof(data->id); |
| 1988 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1989 | if (sample_type & PERF_SAMPLE_ID) |
| 1990 | size += sizeof(data->id); |
| 1991 | |
| 1992 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 1993 | size += sizeof(data->stream_id); |
| 1994 | |
| 1995 | if (sample_type & PERF_SAMPLE_CPU) |
| 1996 | size += sizeof(data->cpu_entry); |
| 1997 | |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 1998 | event->id_header_size = size; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 1999 | } |
| 2000 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 2001 | static bool perf_event_validate_size(struct perf_event *event) |
| 2002 | { |
| 2003 | /* |
| 2004 | * The values computed here will be over-written when we actually |
| 2005 | * attach the event. |
| 2006 | */ |
| 2007 | __perf_event_read_size(event, event->group_leader->nr_siblings + 1); |
| 2008 | __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); |
| 2009 | perf_event__id_header_size(event); |
| 2010 | |
| 2011 | /* |
| 2012 | * Sum the lot; should not exceed the 64k limit we have on records. |
| 2013 | * Conservative limit to allow for callchains and other variable fields. |
| 2014 | */ |
| 2015 | if (event->read_size + event->header_size + |
| 2016 | event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) |
| 2017 | return false; |
| 2018 | |
| 2019 | return true; |
| 2020 | } |
| 2021 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2022 | static void perf_group_attach(struct perf_event *event) |
| 2023 | { |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2024 | struct perf_event *group_leader = event->group_leader, *pos; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2025 | |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2026 | lockdep_assert_held(&event->ctx->lock); |
| 2027 | |
Peter Zijlstra | 74c3337 | 2010-10-15 11:40:29 +0200 | [diff] [blame] | 2028 | /* |
| 2029 | * We can have double attach due to group movement in perf_event_open. |
| 2030 | */ |
| 2031 | if (event->attach_state & PERF_ATTACH_GROUP) |
| 2032 | return; |
| 2033 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2034 | event->attach_state |= PERF_ATTACH_GROUP; |
| 2035 | |
| 2036 | if (group_leader == event) |
| 2037 | return; |
| 2038 | |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 2039 | WARN_ON_ONCE(group_leader->ctx != event->ctx); |
| 2040 | |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 2041 | group_leader->group_caps &= event->event_caps; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2042 | |
Peter Zijlstra | 8343aae | 2017-11-13 14:28:33 +0100 | [diff] [blame] | 2043 | list_add_tail(&event->sibling_list, &group_leader->sibling_list); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2044 | group_leader->nr_siblings++; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2045 | |
| 2046 | perf_event__header_size(group_leader); |
| 2047 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2048 | for_each_sibling_event(pos, group_leader) |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2049 | perf_event__header_size(pos); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2050 | } |
| 2051 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2052 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 2053 | * Remove an event from the lists for its context. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2054 | * Must be called with ctx->mutex and ctx->lock held. |
| 2055 | */ |
| 2056 | static void |
| 2057 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) |
| 2058 | { |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 2059 | WARN_ON_ONCE(event->ctx != ctx); |
| 2060 | lockdep_assert_held(&ctx->lock); |
| 2061 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2062 | /* |
| 2063 | * We can have double detach due to exit/hot-unplug + close. |
| 2064 | */ |
| 2065 | if (!(event->attach_state & PERF_ATTACH_CONTEXT)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2066 | return; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2067 | |
| 2068 | event->attach_state &= ~PERF_ATTACH_CONTEXT; |
| 2069 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2070 | ctx->nr_events--; |
Rob Herring | 82ff0c0 | 2021-12-08 14:11:21 -0600 | [diff] [blame] | 2071 | if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) |
| 2072 | ctx->nr_user--; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2073 | if (event->attr.inherit_stat) |
| 2074 | ctx->nr_stat--; |
| 2075 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2076 | list_del_rcu(&event->event_entry); |
| 2077 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2078 | if (event->group_leader == event) |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2079 | del_event_from_groups(event, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2080 | |
Stephane Eranian | b2e74a2 | 2009-11-26 09:24:30 -0800 | [diff] [blame] | 2081 | /* |
| 2082 | * If event was in error state, then keep it |
| 2083 | * that way, otherwise bogus counts will be |
| 2084 | * returned on read(). The only way to get out |
| 2085 | * of error state is by explicit re-enabling |
| 2086 | * of the event |
| 2087 | */ |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 2088 | if (event->state > PERF_EVENT_STATE_OFF) { |
| 2089 | perf_cgroup_event_disable(event, ctx); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2090 | perf_event_set_state(event, PERF_EVENT_STATE_OFF); |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 2091 | } |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 2092 | |
| 2093 | ctx->generation++; |
Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 2094 | } |
| 2095 | |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 2096 | static int |
| 2097 | perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) |
| 2098 | { |
| 2099 | if (!has_aux(aux_event)) |
| 2100 | return 0; |
| 2101 | |
| 2102 | if (!event->pmu->aux_output_match) |
| 2103 | return 0; |
| 2104 | |
| 2105 | return event->pmu->aux_output_match(aux_event); |
| 2106 | } |
| 2107 | |
| 2108 | static void put_event(struct perf_event *event); |
| 2109 | static void event_sched_out(struct perf_event *event, |
| 2110 | struct perf_cpu_context *cpuctx, |
| 2111 | struct perf_event_context *ctx); |
| 2112 | |
| 2113 | static void perf_put_aux_event(struct perf_event *event) |
| 2114 | { |
| 2115 | struct perf_event_context *ctx = event->ctx; |
| 2116 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 2117 | struct perf_event *iter; |
| 2118 | |
| 2119 | /* |
| 2120 | * If event uses aux_event tear down the link |
| 2121 | */ |
| 2122 | if (event->aux_event) { |
| 2123 | iter = event->aux_event; |
| 2124 | event->aux_event = NULL; |
| 2125 | put_event(iter); |
| 2126 | return; |
| 2127 | } |
| 2128 | |
| 2129 | /* |
| 2130 | * If the event is an aux_event, tear down all links to |
| 2131 | * it from other events. |
| 2132 | */ |
| 2133 | for_each_sibling_event(iter, event->group_leader) { |
| 2134 | if (iter->aux_event != event) |
| 2135 | continue; |
| 2136 | |
| 2137 | iter->aux_event = NULL; |
| 2138 | put_event(event); |
| 2139 | |
| 2140 | /* |
| 2141 | * If it's ACTIVE, schedule it out and put it into ERROR |
| 2142 | * state so that we don't try to schedule it again. Note |
| 2143 | * that perf_event_enable() will clear the ERROR status. |
| 2144 | */ |
| 2145 | event_sched_out(iter, cpuctx, ctx); |
| 2146 | perf_event_set_state(event, PERF_EVENT_STATE_ERROR); |
| 2147 | } |
| 2148 | } |
| 2149 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 2150 | static bool perf_need_aux_event(struct perf_event *event) |
| 2151 | { |
| 2152 | return !!event->attr.aux_output || !!event->attr.aux_sample_size; |
| 2153 | } |
| 2154 | |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 2155 | static int perf_get_aux_event(struct perf_event *event, |
| 2156 | struct perf_event *group_leader) |
| 2157 | { |
| 2158 | /* |
| 2159 | * Our group leader must be an aux event if we want to be |
| 2160 | * an aux_output. This way, the aux event will precede its |
| 2161 | * aux_output events in the group, and therefore will always |
| 2162 | * schedule first. |
| 2163 | */ |
| 2164 | if (!group_leader) |
| 2165 | return 0; |
| 2166 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 2167 | /* |
| 2168 | * aux_output and aux_sample_size are mutually exclusive. |
| 2169 | */ |
| 2170 | if (event->attr.aux_output && event->attr.aux_sample_size) |
| 2171 | return 0; |
| 2172 | |
| 2173 | if (event->attr.aux_output && |
| 2174 | !perf_aux_output_match(event, group_leader)) |
| 2175 | return 0; |
| 2176 | |
| 2177 | if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 2178 | return 0; |
| 2179 | |
| 2180 | if (!atomic_long_inc_not_zero(&group_leader->refcount)) |
| 2181 | return 0; |
| 2182 | |
| 2183 | /* |
| 2184 | * Link aux_outputs to their aux event; this is undone in |
| 2185 | * perf_group_detach() by perf_put_aux_event(). When the |
| 2186 | * group in torn down, the aux_output events loose their |
| 2187 | * link to the aux_event and can't schedule any more. |
| 2188 | */ |
| 2189 | event->aux_event = group_leader; |
| 2190 | |
| 2191 | return 1; |
| 2192 | } |
| 2193 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 2194 | static inline struct list_head *get_event_list(struct perf_event *event) |
| 2195 | { |
| 2196 | struct perf_event_context *ctx = event->ctx; |
| 2197 | return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; |
| 2198 | } |
| 2199 | |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 2200 | /* |
| 2201 | * Events that have PERF_EV_CAP_SIBLING require being part of a group and |
| 2202 | * cannot exist on their own, schedule them out and move them into the ERROR |
| 2203 | * state. Also see _perf_event_enable(), it will not be able to recover |
| 2204 | * this ERROR state. |
| 2205 | */ |
| 2206 | static inline void perf_remove_sibling_event(struct perf_event *event) |
| 2207 | { |
| 2208 | struct perf_event_context *ctx = event->ctx; |
| 2209 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 2210 | |
| 2211 | event_sched_out(event, cpuctx, ctx); |
| 2212 | perf_event_set_state(event, PERF_EVENT_STATE_ERROR); |
| 2213 | } |
| 2214 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2215 | static void perf_group_detach(struct perf_event *event) |
Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 2216 | { |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 2217 | struct perf_event *leader = event->group_leader; |
Peter Zijlstra | 050735b | 2010-05-11 11:51:53 +0200 | [diff] [blame] | 2218 | struct perf_event *sibling, *tmp; |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 2219 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2220 | |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 2221 | lockdep_assert_held(&ctx->lock); |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2222 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2223 | /* |
| 2224 | * We can have double detach due to exit/hot-unplug + close. |
| 2225 | */ |
| 2226 | if (!(event->attach_state & PERF_ATTACH_GROUP)) |
| 2227 | return; |
| 2228 | |
| 2229 | event->attach_state &= ~PERF_ATTACH_GROUP; |
| 2230 | |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 2231 | perf_put_aux_event(event); |
| 2232 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2233 | /* |
| 2234 | * If this is a sibling, remove it from its group. |
| 2235 | */ |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 2236 | if (leader != event) { |
Peter Zijlstra | 8343aae | 2017-11-13 14:28:33 +0100 | [diff] [blame] | 2237 | list_del_init(&event->sibling_list); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2238 | event->group_leader->nr_siblings--; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2239 | goto out; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2240 | } |
| 2241 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2242 | /* |
| 2243 | * If this was a group event with sibling events then |
| 2244 | * upgrade the siblings to singleton events by adding them |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2245 | * to whatever list we are on. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2246 | */ |
Peter Zijlstra | 8343aae | 2017-11-13 14:28:33 +0100 | [diff] [blame] | 2247 | list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2248 | |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 2249 | if (sibling->event_caps & PERF_EV_CAP_SIBLING) |
| 2250 | perf_remove_sibling_event(sibling); |
| 2251 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2252 | sibling->group_leader = sibling; |
Mark Rutland | 2486836 | 2018-03-16 12:51:40 +0000 | [diff] [blame] | 2253 | list_del_init(&sibling->sibling_list); |
Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 2254 | |
| 2255 | /* Inherit group flags from the previous leader */ |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 2256 | sibling->group_caps = event->group_caps; |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 2257 | |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2258 | if (!RB_EMPTY_NODE(&event->group_node)) { |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2259 | add_event_to_groups(sibling, event->ctx); |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 2260 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 2261 | if (sibling->state == PERF_EVENT_STATE_ACTIVE) |
| 2262 | list_add_tail(&sibling->active_list, get_event_list(sibling)); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 2263 | } |
| 2264 | |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 2265 | WARN_ON_ONCE(sibling->ctx != event->ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2266 | } |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2267 | |
| 2268 | out: |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 2269 | for_each_sibling_event(tmp, leader) |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 2270 | perf_event__header_size(tmp); |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 2271 | |
| 2272 | perf_event__header_size(leader); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2273 | } |
| 2274 | |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 2275 | static void sync_child_event(struct perf_event *child_event); |
| 2276 | |
| 2277 | static void perf_child_detach(struct perf_event *event) |
| 2278 | { |
| 2279 | struct perf_event *parent_event = event->parent; |
| 2280 | |
| 2281 | if (!(event->attach_state & PERF_ATTACH_CHILD)) |
| 2282 | return; |
| 2283 | |
| 2284 | event->attach_state &= ~PERF_ATTACH_CHILD; |
| 2285 | |
| 2286 | if (WARN_ON_ONCE(!parent_event)) |
| 2287 | return; |
| 2288 | |
| 2289 | lockdep_assert_held(&parent_event->child_mutex); |
| 2290 | |
| 2291 | sync_child_event(event); |
| 2292 | list_del_init(&event->child_list); |
| 2293 | } |
| 2294 | |
Jiri Olsa | fadfe7b | 2014-08-01 14:33:02 +0200 | [diff] [blame] | 2295 | static bool is_orphaned_event(struct perf_event *event) |
| 2296 | { |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 2297 | return event->state == PERF_EVENT_STATE_DEAD; |
Jiri Olsa | fadfe7b | 2014-08-01 14:33:02 +0200 | [diff] [blame] | 2298 | } |
| 2299 | |
Mark Rutland | 2c81a64 | 2016-06-14 16:10:41 +0100 | [diff] [blame] | 2300 | static inline int __pmu_filter_match(struct perf_event *event) |
Mark Rutland | 66eb579 | 2015-05-13 17:12:23 +0100 | [diff] [blame] | 2301 | { |
| 2302 | struct pmu *pmu = event->pmu; |
| 2303 | return pmu->filter_match ? pmu->filter_match(event) : 1; |
| 2304 | } |
| 2305 | |
Mark Rutland | 2c81a64 | 2016-06-14 16:10:41 +0100 | [diff] [blame] | 2306 | /* |
| 2307 | * Check whether we should attempt to schedule an event group based on |
| 2308 | * PMU-specific filtering. An event group can consist of HW and SW events, |
| 2309 | * potentially with a SW leader, so we must check all the filters, to |
| 2310 | * determine whether a group is schedulable: |
| 2311 | */ |
| 2312 | static inline int pmu_filter_match(struct perf_event *event) |
| 2313 | { |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2314 | struct perf_event *sibling; |
Mark Rutland | 2c81a64 | 2016-06-14 16:10:41 +0100 | [diff] [blame] | 2315 | |
| 2316 | if (!__pmu_filter_match(event)) |
| 2317 | return 0; |
| 2318 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2319 | for_each_sibling_event(sibling, event) { |
| 2320 | if (!__pmu_filter_match(sibling)) |
Mark Rutland | 2c81a64 | 2016-06-14 16:10:41 +0100 | [diff] [blame] | 2321 | return 0; |
| 2322 | } |
| 2323 | |
| 2324 | return 1; |
| 2325 | } |
| 2326 | |
Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 2327 | static inline int |
| 2328 | event_filter_match(struct perf_event *event) |
| 2329 | { |
Peter Zijlstra | 0b8f1e2 | 2016-08-04 14:37:24 +0200 | [diff] [blame] | 2330 | return (event->cpu == -1 || event->cpu == smp_processor_id()) && |
| 2331 | perf_cgroup_match(event) && pmu_filter_match(event); |
Stephane Eranian | fa66f07 | 2010-08-26 16:40:01 +0200 | [diff] [blame] | 2332 | } |
| 2333 | |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2334 | static void |
| 2335 | event_sched_out(struct perf_event *event, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2336 | struct perf_cpu_context *cpuctx, |
| 2337 | struct perf_event_context *ctx) |
| 2338 | { |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2339 | enum perf_event_state state = PERF_EVENT_STATE_INACTIVE; |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 2340 | |
| 2341 | WARN_ON_ONCE(event->ctx != ctx); |
| 2342 | lockdep_assert_held(&ctx->lock); |
| 2343 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2344 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2345 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2346 | |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 2347 | /* |
| 2348 | * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but |
| 2349 | * we can schedule events _OUT_ individually through things like |
| 2350 | * __perf_remove_from_context(). |
| 2351 | */ |
| 2352 | list_del_init(&event->active_list); |
| 2353 | |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2354 | perf_pmu_disable(event->pmu); |
| 2355 | |
Peter Zijlstra | 28a967c | 2016-02-24 18:45:46 +0100 | [diff] [blame] | 2356 | event->pmu->del(event, 0); |
| 2357 | event->oncpu = -1; |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2358 | |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 2359 | if (READ_ONCE(event->pending_disable) >= 0) { |
| 2360 | WRITE_ONCE(event->pending_disable, -1); |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 2361 | perf_cgroup_event_disable(event, ctx); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2362 | state = PERF_EVENT_STATE_OFF; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2363 | } |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2364 | perf_event_set_state(event, state); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2365 | |
| 2366 | if (!is_software_event(event)) |
| 2367 | cpuctx->active_oncpu--; |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 2368 | if (!--ctx->nr_active) |
| 2369 | perf_event_ctx_deactivate(ctx); |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 2370 | if (event->attr.freq && event->attr.sample_freq) |
| 2371 | ctx->nr_freq--; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2372 | if (event->attr.exclusive || !cpuctx->active_oncpu) |
| 2373 | cpuctx->exclusive = 0; |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2374 | |
| 2375 | perf_pmu_enable(event->pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2376 | } |
| 2377 | |
| 2378 | static void |
| 2379 | group_sched_out(struct perf_event *group_event, |
| 2380 | struct perf_cpu_context *cpuctx, |
| 2381 | struct perf_event_context *ctx) |
| 2382 | { |
| 2383 | struct perf_event *event; |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2384 | |
| 2385 | if (group_event->state != PERF_EVENT_STATE_ACTIVE) |
| 2386 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2387 | |
Mark Rutland | 3f005e7 | 2016-07-26 18:12:21 +0100 | [diff] [blame] | 2388 | perf_pmu_disable(ctx->pmu); |
| 2389 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2390 | event_sched_out(group_event, cpuctx, ctx); |
| 2391 | |
| 2392 | /* |
| 2393 | * Schedule out siblings (if any): |
| 2394 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2395 | for_each_sibling_event(event, group_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2396 | event_sched_out(event, cpuctx, ctx); |
| 2397 | |
Mark Rutland | 3f005e7 | 2016-07-26 18:12:21 +0100 | [diff] [blame] | 2398 | perf_pmu_enable(ctx->pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2399 | } |
| 2400 | |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2401 | #define DETACH_GROUP 0x01UL |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 2402 | #define DETACH_CHILD 0x02UL |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 2403 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2404 | /* |
| 2405 | * Cross CPU call to remove a performance event |
| 2406 | * |
| 2407 | * We disable the event on the hardware level first. After that we |
| 2408 | * remove it from the context list. |
| 2409 | */ |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2410 | static void |
| 2411 | __perf_remove_from_context(struct perf_event *event, |
| 2412 | struct perf_cpu_context *cpuctx, |
| 2413 | struct perf_event_context *ctx, |
| 2414 | void *info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2415 | { |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2416 | unsigned long flags = (unsigned long)info; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2417 | |
Peter Zijlstra | 3c5c871 | 2017-09-05 13:44:51 +0200 | [diff] [blame] | 2418 | if (ctx->is_active & EVENT_TIME) { |
| 2419 | update_context_time(ctx); |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 2420 | update_cgrp_time_from_cpuctx(cpuctx, false); |
Peter Zijlstra | 3c5c871 | 2017-09-05 13:44:51 +0200 | [diff] [blame] | 2421 | } |
| 2422 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2423 | event_sched_out(event, cpuctx, ctx); |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2424 | if (flags & DETACH_GROUP) |
Peter Zijlstra | 46ce0fe | 2014-05-02 16:56:01 +0200 | [diff] [blame] | 2425 | perf_group_detach(event); |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 2426 | if (flags & DETACH_CHILD) |
| 2427 | perf_child_detach(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2428 | list_del_event(event, ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2429 | |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2430 | if (!ctx->nr_events && ctx->is_active) { |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 2431 | if (ctx == &cpuctx->ctx) |
| 2432 | update_cgrp_time_from_cpuctx(cpuctx, true); |
| 2433 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2434 | ctx->is_active = 0; |
Peter Zijlstra | 90c91df | 2020-03-05 13:38:51 +0100 | [diff] [blame] | 2435 | ctx->rotate_necessary = 0; |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2436 | if (ctx->task) { |
| 2437 | WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
| 2438 | cpuctx->task_ctx = NULL; |
| 2439 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2440 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2441 | } |
| 2442 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2443 | /* |
| 2444 | * Remove the event from a task's (or a CPU's) list of events. |
| 2445 | * |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2446 | * If event->ctx is a cloned context, callers must make sure that |
| 2447 | * every task struct that event->ctx->task could possibly point to |
| 2448 | * remains valid. This is OK when called from perf_release since |
| 2449 | * that only calls us on the top-level context, which can't be a clone. |
| 2450 | * When called from perf_event_exit_task, it's OK because the |
| 2451 | * context has been detached from its task. |
| 2452 | */ |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 2453 | static void perf_remove_from_context(struct perf_event *event, unsigned long flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2454 | { |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2455 | struct perf_event_context *ctx = event->ctx; |
| 2456 | |
| 2457 | lockdep_assert_held(&ctx->mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2458 | |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2459 | /* |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 2460 | * Because of perf_event_exit_task(), perf_remove_from_context() ought |
| 2461 | * to work in the face of TASK_TOMBSTONE, unlike every other |
| 2462 | * event_function_call() user. |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2463 | */ |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 2464 | raw_spin_lock_irq(&ctx->lock); |
Namhyung Kim | c5de60c | 2022-01-24 11:58:08 -0800 | [diff] [blame] | 2465 | /* |
| 2466 | * Cgroup events are per-cpu events, and must IPI because of |
| 2467 | * cgrp_cpuctx_list. |
| 2468 | */ |
| 2469 | if (!ctx->is_active && !is_cgroup_event(event)) { |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 2470 | __perf_remove_from_context(event, __get_cpu_context(ctx), |
| 2471 | ctx, (void *)flags); |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2472 | raw_spin_unlock_irq(&ctx->lock); |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 2473 | return; |
Peter Zijlstra | a76a82a | 2017-01-26 16:39:55 +0100 | [diff] [blame] | 2474 | } |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 2475 | raw_spin_unlock_irq(&ctx->lock); |
| 2476 | |
| 2477 | event_function_call(event, __perf_remove_from_context, (void *)flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2478 | } |
| 2479 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2480 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2481 | * Cross CPU call to disable a performance event |
| 2482 | */ |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2483 | static void __perf_event_disable(struct perf_event *event, |
| 2484 | struct perf_cpu_context *cpuctx, |
| 2485 | struct perf_event_context *ctx, |
| 2486 | void *info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2487 | { |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2488 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 2489 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2490 | |
Peter Zijlstra | 3c5c871 | 2017-09-05 13:44:51 +0200 | [diff] [blame] | 2491 | if (ctx->is_active & EVENT_TIME) { |
| 2492 | update_context_time(ctx); |
| 2493 | update_cgrp_time_from_event(event); |
| 2494 | } |
| 2495 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2496 | if (event == event->group_leader) |
| 2497 | group_sched_out(event, cpuctx, ctx); |
| 2498 | else |
| 2499 | event_sched_out(event, cpuctx, ctx); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2500 | |
| 2501 | perf_event_set_state(event, PERF_EVENT_STATE_OFF); |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 2502 | perf_cgroup_event_disable(event, ctx); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2503 | } |
| 2504 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2505 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 2506 | * Disable an event. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2507 | * |
| 2508 | * If event->ctx is a cloned context, callers must make sure that |
| 2509 | * every task struct that event->ctx->task could possibly point to |
Roy Ben Shlomo | 9f014e3 | 2019-09-20 20:12:53 +0300 | [diff] [blame] | 2510 | * remains valid. This condition is satisfied when called through |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2511 | * perf_event_for_each_child or perf_event_for_each because they |
| 2512 | * hold the top-level event's child_mutex, so any descendant that |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 2513 | * goes to exit will block in perf_event_exit_event(). |
| 2514 | * |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2515 | * When called from perf_pending_event it's OK because event->ctx |
| 2516 | * is the current context on this CPU and preemption is disabled, |
| 2517 | * hence we can't get into perf_event_task_sched_out for this context. |
| 2518 | */ |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2519 | static void _perf_event_disable(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2520 | { |
| 2521 | struct perf_event_context *ctx = event->ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2522 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2523 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2524 | if (event->state <= PERF_EVENT_STATE_OFF) { |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2525 | raw_spin_unlock_irq(&ctx->lock); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2526 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2527 | } |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 2528 | raw_spin_unlock_irq(&ctx->lock); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 2529 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2530 | event_function_call(event, __perf_event_disable, NULL); |
| 2531 | } |
| 2532 | |
| 2533 | void perf_event_disable_local(struct perf_event *event) |
| 2534 | { |
| 2535 | event_function_local(event, __perf_event_disable, NULL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2536 | } |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 2537 | |
| 2538 | /* |
| 2539 | * Strictly speaking kernel users cannot create groups and therefore this |
| 2540 | * interface does not need the perf_event_ctx_lock() magic. |
| 2541 | */ |
| 2542 | void perf_event_disable(struct perf_event *event) |
| 2543 | { |
| 2544 | struct perf_event_context *ctx; |
| 2545 | |
| 2546 | ctx = perf_event_ctx_lock(event); |
| 2547 | _perf_event_disable(event); |
| 2548 | perf_event_ctx_unlock(event, ctx); |
| 2549 | } |
Robert Richter | dcfce4a | 2011-10-11 17:11:08 +0200 | [diff] [blame] | 2550 | EXPORT_SYMBOL_GPL(perf_event_disable); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2551 | |
Jiri Olsa | 5aab90c | 2016-10-26 11:48:24 +0200 | [diff] [blame] | 2552 | void perf_event_disable_inatomic(struct perf_event *event) |
| 2553 | { |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 2554 | WRITE_ONCE(event->pending_disable, smp_processor_id()); |
| 2555 | /* can fail, see perf_pending_event_disable() */ |
Jiri Olsa | 5aab90c | 2016-10-26 11:48:24 +0200 | [diff] [blame] | 2556 | irq_work_queue(&event->pending); |
| 2557 | } |
| 2558 | |
Peter Zijlstra | 4fe757d | 2011-02-15 22:26:07 +0100 | [diff] [blame] | 2559 | #define MAX_INTERRUPTS (~0ULL) |
| 2560 | |
| 2561 | static void perf_log_throttle(struct perf_event *event, int enable); |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 2562 | static void perf_log_itrace_start(struct perf_event *event); |
Peter Zijlstra | 4fe757d | 2011-02-15 22:26:07 +0100 | [diff] [blame] | 2563 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2564 | static int |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2565 | event_sched_in(struct perf_event *event, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2566 | struct perf_cpu_context *cpuctx, |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2567 | struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2568 | { |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2569 | int ret = 0; |
Stephane Eranian | 4158755 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 2570 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 2571 | WARN_ON_ONCE(event->ctx != ctx); |
| 2572 | |
Peter Zijlstra | 6334241 | 2014-05-05 11:49:16 +0200 | [diff] [blame] | 2573 | lockdep_assert_held(&ctx->lock); |
| 2574 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2575 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 2576 | return 0; |
| 2577 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 2578 | WRITE_ONCE(event->oncpu, smp_processor_id()); |
| 2579 | /* |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 2580 | * Order event::oncpu write to happen before the ACTIVE state is |
| 2581 | * visible. This allows perf_event_{stop,read}() to observe the correct |
| 2582 | * ->oncpu if it sees ACTIVE. |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 2583 | */ |
| 2584 | smp_wmb(); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2585 | perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); |
Peter Zijlstra | 4fe757d | 2011-02-15 22:26:07 +0100 | [diff] [blame] | 2586 | |
| 2587 | /* |
| 2588 | * Unthrottle events, since we scheduled we might have missed several |
| 2589 | * ticks already, also for a heavily scheduling task there is little |
| 2590 | * guarantee it'll get a tick in a timely manner. |
| 2591 | */ |
| 2592 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { |
| 2593 | perf_log_throttle(event, 1); |
| 2594 | event->hw.interrupts = 0; |
| 2595 | } |
| 2596 | |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2597 | perf_pmu_disable(event->pmu); |
| 2598 | |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 2599 | perf_log_itrace_start(event); |
| 2600 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 2601 | if (event->pmu->add(event, PERF_EF_START)) { |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2602 | perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2603 | event->oncpu = -1; |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2604 | ret = -EAGAIN; |
| 2605 | goto out; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2606 | } |
| 2607 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2608 | if (!is_software_event(event)) |
| 2609 | cpuctx->active_oncpu++; |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 2610 | if (!ctx->nr_active++) |
| 2611 | perf_event_ctx_activate(ctx); |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 2612 | if (event->attr.freq && event->attr.sample_freq) |
| 2613 | ctx->nr_freq++; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2614 | |
| 2615 | if (event->attr.exclusive) |
| 2616 | cpuctx->exclusive = 1; |
| 2617 | |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 2618 | out: |
| 2619 | perf_pmu_enable(event->pmu); |
| 2620 | |
| 2621 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2622 | } |
| 2623 | |
| 2624 | static int |
| 2625 | group_sched_in(struct perf_event *group_event, |
| 2626 | struct perf_cpu_context *cpuctx, |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 2627 | struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2628 | { |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 2629 | struct perf_event *event, *partial_group = NULL; |
Peter Zijlstra | 4a23459 | 2014-02-24 12:43:31 +0100 | [diff] [blame] | 2630 | struct pmu *pmu = ctx->pmu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2631 | |
| 2632 | if (group_event->state == PERF_EVENT_STATE_OFF) |
| 2633 | return 0; |
| 2634 | |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 2635 | pmu->start_txn(pmu, PERF_PMU_TXN_ADD); |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 2636 | |
Peter Zijlstra | 251ff2d | 2020-10-29 16:29:15 +0100 | [diff] [blame] | 2637 | if (event_sched_in(group_event, cpuctx, ctx)) |
| 2638 | goto error; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2639 | |
| 2640 | /* |
| 2641 | * Schedule in siblings as one group (if any): |
| 2642 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2643 | for_each_sibling_event(event, group_event) { |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2644 | if (event_sched_in(event, cpuctx, ctx)) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2645 | partial_group = event; |
| 2646 | goto group_error; |
| 2647 | } |
| 2648 | } |
| 2649 | |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2650 | if (!pmu->commit_txn(pmu)) |
Paul Mackerras | 6e85158 | 2010-05-08 20:58:00 +1000 | [diff] [blame] | 2651 | return 0; |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2652 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2653 | group_error: |
| 2654 | /* |
| 2655 | * Groups can be scheduled in as one unit only, so undo any |
| 2656 | * partial group before returning: |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2657 | * The events up to the failed event are scheduled out normally. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2658 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 2659 | for_each_sibling_event(event, group_event) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2660 | if (event == partial_group) |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2661 | break; |
Stephane Eranian | d7842da | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2662 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 2663 | event_sched_out(event, cpuctx, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2664 | } |
Stephane Eranian | 9ffcfa6 | 2010-10-20 15:25:01 +0200 | [diff] [blame] | 2665 | event_sched_out(group_event, cpuctx, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2666 | |
Peter Zijlstra | 251ff2d | 2020-10-29 16:29:15 +0100 | [diff] [blame] | 2667 | error: |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 2668 | pmu->cancel_txn(pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2669 | return -EAGAIN; |
| 2670 | } |
| 2671 | |
| 2672 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2673 | * Work out whether we can put this event group on the CPU now. |
| 2674 | */ |
| 2675 | static int group_can_go_on(struct perf_event *event, |
| 2676 | struct perf_cpu_context *cpuctx, |
| 2677 | int can_add_hw) |
| 2678 | { |
| 2679 | /* |
| 2680 | * Groups consisting entirely of software events can always go on. |
| 2681 | */ |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 2682 | if (event->group_caps & PERF_EV_CAP_SOFTWARE) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2683 | return 1; |
| 2684 | /* |
| 2685 | * If an exclusive group is already on, no other hardware |
| 2686 | * events can go on. |
| 2687 | */ |
| 2688 | if (cpuctx->exclusive) |
| 2689 | return 0; |
| 2690 | /* |
| 2691 | * If this group is exclusive and there are already |
| 2692 | * events on the CPU, it can't go on. |
| 2693 | */ |
Peter Zijlstra | 1908dc9 | 2020-10-29 16:32:22 +0100 | [diff] [blame] | 2694 | if (event->attr.exclusive && !list_empty(get_event_list(event))) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2695 | return 0; |
| 2696 | /* |
| 2697 | * Otherwise, try to add it if all previous groups were able |
| 2698 | * to go on. |
| 2699 | */ |
| 2700 | return can_add_hw; |
| 2701 | } |
| 2702 | |
| 2703 | static void add_event_to_ctx(struct perf_event *event, |
| 2704 | struct perf_event_context *ctx) |
| 2705 | { |
| 2706 | list_add_event(event, ctx); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 2707 | perf_group_attach(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2708 | } |
| 2709 | |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2710 | static void ctx_sched_out(struct perf_event_context *ctx, |
| 2711 | struct perf_cpu_context *cpuctx, |
| 2712 | enum event_type_t event_type); |
Peter Zijlstra | 2c29ef0 | 2011-04-09 21:17:44 +0200 | [diff] [blame] | 2713 | static void |
| 2714 | ctx_sched_in(struct perf_event_context *ctx, |
| 2715 | struct perf_cpu_context *cpuctx, |
| 2716 | enum event_type_t event_type, |
| 2717 | struct task_struct *task); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2718 | |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2719 | static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2720 | struct perf_event_context *ctx, |
| 2721 | enum event_type_t event_type) |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2722 | { |
| 2723 | if (!cpuctx->task_ctx) |
| 2724 | return; |
| 2725 | |
| 2726 | if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) |
| 2727 | return; |
| 2728 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2729 | ctx_sched_out(ctx, cpuctx, event_type); |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 2730 | } |
| 2731 | |
Peter Zijlstra | dce5855 | 2011-04-09 21:17:46 +0200 | [diff] [blame] | 2732 | static void perf_event_sched_in(struct perf_cpu_context *cpuctx, |
| 2733 | struct perf_event_context *ctx, |
| 2734 | struct task_struct *task) |
| 2735 | { |
| 2736 | cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); |
| 2737 | if (ctx) |
| 2738 | ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); |
| 2739 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); |
| 2740 | if (ctx) |
| 2741 | ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); |
| 2742 | } |
| 2743 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2744 | /* |
| 2745 | * We want to maintain the following priority of scheduling: |
| 2746 | * - CPU pinned (EVENT_CPU | EVENT_PINNED) |
| 2747 | * - task pinned (EVENT_PINNED) |
| 2748 | * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE) |
| 2749 | * - task flexible (EVENT_FLEXIBLE). |
| 2750 | * |
| 2751 | * In order to avoid unscheduling and scheduling back in everything every |
| 2752 | * time an event is added, only do it for the groups of equal priority and |
| 2753 | * below. |
| 2754 | * |
| 2755 | * This can be called after a batch operation on task events, in which case |
| 2756 | * event_type is a bit mask of the types of events involved. For CPU events, |
| 2757 | * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE. |
| 2758 | */ |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 2759 | static void ctx_resched(struct perf_cpu_context *cpuctx, |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2760 | struct perf_event_context *task_ctx, |
| 2761 | enum event_type_t event_type) |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 2762 | { |
Song Liu | bd903af | 2018-03-05 21:55:04 -0800 | [diff] [blame] | 2763 | enum event_type_t ctx_event_type; |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2764 | bool cpu_event = !!(event_type & EVENT_CPU); |
| 2765 | |
| 2766 | /* |
| 2767 | * If pinned groups are involved, flexible groups also need to be |
| 2768 | * scheduled out. |
| 2769 | */ |
| 2770 | if (event_type & EVENT_PINNED) |
| 2771 | event_type |= EVENT_FLEXIBLE; |
| 2772 | |
Song Liu | bd903af | 2018-03-05 21:55:04 -0800 | [diff] [blame] | 2773 | ctx_event_type = event_type & EVENT_ALL; |
| 2774 | |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 2775 | perf_pmu_disable(cpuctx->ctx.pmu); |
| 2776 | if (task_ctx) |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2777 | task_ctx_sched_out(cpuctx, task_ctx, event_type); |
| 2778 | |
| 2779 | /* |
| 2780 | * Decide which cpu ctx groups to schedule out based on the types |
| 2781 | * of events that caused rescheduling: |
| 2782 | * - EVENT_CPU: schedule out corresponding groups; |
| 2783 | * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups; |
| 2784 | * - otherwise, do nothing more. |
| 2785 | */ |
| 2786 | if (cpu_event) |
| 2787 | cpu_ctx_sched_out(cpuctx, ctx_event_type); |
| 2788 | else if (ctx_event_type & EVENT_PINNED) |
| 2789 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
| 2790 | |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 2791 | perf_event_sched_in(cpuctx, task_ctx, current); |
| 2792 | perf_pmu_enable(cpuctx->ctx.pmu); |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 2793 | } |
| 2794 | |
Stephane Eranian | c68d224 | 2019-04-08 10:32:51 -0700 | [diff] [blame] | 2795 | void perf_pmu_resched(struct pmu *pmu) |
| 2796 | { |
| 2797 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| 2798 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
| 2799 | |
| 2800 | perf_ctx_lock(cpuctx, task_ctx); |
| 2801 | ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); |
| 2802 | perf_ctx_unlock(cpuctx, task_ctx); |
| 2803 | } |
| 2804 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2805 | /* |
| 2806 | * Cross CPU call to install and enable a performance event |
| 2807 | * |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2808 | * Very similar to remote_function() + event_function() but cannot assume that |
| 2809 | * things like ctx->is_active and cpuctx->task_ctx are set. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2810 | */ |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2811 | static int __perf_install_in_context(void *info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2812 | { |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2813 | struct perf_event *event = info; |
| 2814 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 2815 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
Peter Zijlstra | 2c29ef0 | 2011-04-09 21:17:44 +0200 | [diff] [blame] | 2816 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2817 | bool reprogram = true; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2818 | int ret = 0; |
Peter Zijlstra | 2c29ef0 | 2011-04-09 21:17:44 +0200 | [diff] [blame] | 2819 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2820 | raw_spin_lock(&cpuctx->ctx.lock); |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2821 | if (ctx->task) { |
Peter Zijlstra | b58f6b0 | 2011-06-07 00:23:28 +0200 | [diff] [blame] | 2822 | raw_spin_lock(&ctx->lock); |
| 2823 | task_ctx = ctx; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2824 | |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2825 | reprogram = (ctx->task == current); |
| 2826 | |
| 2827 | /* |
| 2828 | * If the task is running, it must be running on this CPU, |
| 2829 | * otherwise we cannot reprogram things. |
| 2830 | * |
| 2831 | * If its not running, we don't care, ctx->lock will |
| 2832 | * serialize against it becoming runnable. |
| 2833 | */ |
| 2834 | if (task_curr(ctx->task) && !reprogram) { |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2835 | ret = -ESRCH; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2836 | goto unlock; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2837 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2838 | |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2839 | WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2840 | } else if (task_ctx) { |
| 2841 | raw_spin_lock(&task_ctx->lock); |
Peter Zijlstra | b58f6b0 | 2011-06-07 00:23:28 +0200 | [diff] [blame] | 2842 | } |
| 2843 | |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 2844 | #ifdef CONFIG_CGROUP_PERF |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 2845 | if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { |
leilei.lin | 33801b9 | 2018-03-06 17:36:37 +0800 | [diff] [blame] | 2846 | /* |
| 2847 | * If the current cgroup doesn't match the event's |
| 2848 | * cgroup, we should not try to schedule it. |
| 2849 | */ |
| 2850 | struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); |
| 2851 | reprogram = cgroup_is_descendant(cgrp->css.cgroup, |
| 2852 | event->cgrp->css.cgroup); |
| 2853 | } |
| 2854 | #endif |
| 2855 | |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2856 | if (reprogram) { |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2857 | ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
| 2858 | add_event_to_ctx(event, ctx); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 2859 | ctx_resched(cpuctx, task_ctx, get_event_type(event)); |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2860 | } else { |
| 2861 | add_event_to_ctx(event, ctx); |
| 2862 | } |
| 2863 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2864 | unlock: |
Peter Zijlstra | 2c29ef0 | 2011-04-09 21:17:44 +0200 | [diff] [blame] | 2865 | perf_ctx_unlock(cpuctx, task_ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2866 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2867 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2868 | } |
| 2869 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 2870 | static bool exclusive_event_installable(struct perf_event *event, |
| 2871 | struct perf_event_context *ctx); |
| 2872 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2873 | /* |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2874 | * Attach a performance event to a context. |
| 2875 | * |
| 2876 | * Very similar to event_function_call, see comment there. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2877 | */ |
| 2878 | static void |
| 2879 | perf_install_in_context(struct perf_event_context *ctx, |
| 2880 | struct perf_event *event, |
| 2881 | int cpu) |
| 2882 | { |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2883 | struct task_struct *task = READ_ONCE(ctx->task); |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2884 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 2885 | lockdep_assert_held(&ctx->mutex); |
| 2886 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 2887 | WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); |
| 2888 | |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 2889 | if (event->cpu != -1) |
| 2890 | event->cpu = cpu; |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 2891 | |
Peter Zijlstra | 0b8f1e2 | 2016-08-04 14:37:24 +0200 | [diff] [blame] | 2892 | /* |
| 2893 | * Ensures that if we can observe event->ctx, both the event and ctx |
| 2894 | * will be 'complete'. See perf_iterate_sb_cpu(). |
| 2895 | */ |
| 2896 | smp_store_release(&event->ctx, ctx); |
| 2897 | |
Peter Zijlstra | db0503e | 2019-10-21 16:02:39 +0200 | [diff] [blame] | 2898 | /* |
| 2899 | * perf_event_attr::disabled events will not run and can be initialized |
| 2900 | * without IPI. Except when this is the first event for the context, in |
| 2901 | * that case we need the magic of the IPI to set ctx->is_active. |
Namhyung Kim | c5de60c | 2022-01-24 11:58:08 -0800 | [diff] [blame] | 2902 | * Similarly, cgroup events for the context also needs the IPI to |
| 2903 | * manipulate the cgrp_cpuctx_list. |
Peter Zijlstra | db0503e | 2019-10-21 16:02:39 +0200 | [diff] [blame] | 2904 | * |
| 2905 | * The IOC_ENABLE that is sure to follow the creation of a disabled |
| 2906 | * event will issue the IPI and reprogram the hardware. |
| 2907 | */ |
Namhyung Kim | c5de60c | 2022-01-24 11:58:08 -0800 | [diff] [blame] | 2908 | if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && |
| 2909 | ctx->nr_events && !is_cgroup_event(event)) { |
Peter Zijlstra | db0503e | 2019-10-21 16:02:39 +0200 | [diff] [blame] | 2910 | raw_spin_lock_irq(&ctx->lock); |
| 2911 | if (ctx->task == TASK_TOMBSTONE) { |
| 2912 | raw_spin_unlock_irq(&ctx->lock); |
| 2913 | return; |
| 2914 | } |
| 2915 | add_event_to_ctx(event, ctx); |
| 2916 | raw_spin_unlock_irq(&ctx->lock); |
| 2917 | return; |
| 2918 | } |
| 2919 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2920 | if (!task) { |
| 2921 | cpu_function_call(cpu, __perf_install_in_context, event); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 2922 | return; |
| 2923 | } |
Peter Zijlstra | 6f932e5 | 2016-02-24 18:45:43 +0100 | [diff] [blame] | 2924 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2925 | /* |
| 2926 | * Should not happen, we validate the ctx is still alive before calling. |
| 2927 | */ |
| 2928 | if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) |
| 2929 | return; |
| 2930 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2931 | /* |
| 2932 | * Installing events is tricky because we cannot rely on ctx->is_active |
| 2933 | * to be set in case this is the nr_events 0 -> 1 transition. |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2934 | * |
| 2935 | * Instead we use task_curr(), which tells us if the task is running. |
| 2936 | * However, since we use task_curr() outside of rq::lock, we can race |
| 2937 | * against the actual state. This means the result can be wrong. |
| 2938 | * |
| 2939 | * If we get a false positive, we retry, this is harmless. |
| 2940 | * |
| 2941 | * If we get a false negative, things are complicated. If we are after |
| 2942 | * perf_event_context_sched_in() ctx::lock will serialize us, and the |
| 2943 | * value must be correct. If we're before, it doesn't matter since |
| 2944 | * perf_event_context_sched_in() will program the counter. |
| 2945 | * |
| 2946 | * However, this hinges on the remote context switch having observed |
| 2947 | * our task->perf_event_ctxp[] store, such that it will in fact take |
| 2948 | * ctx::lock in perf_event_context_sched_in(). |
| 2949 | * |
| 2950 | * We do this by task_function_call(), if the IPI fails to hit the task |
| 2951 | * we know any future context switch of task must see the |
| 2952 | * perf_event_ctpx[] store. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2953 | */ |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2954 | |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2955 | /* |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2956 | * This smp_mb() orders the task->perf_event_ctxp[] store with the |
| 2957 | * task_cpu() load, such that if the IPI then does not find the task |
| 2958 | * running, a future context switch of that task must observe the |
| 2959 | * store. |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2960 | */ |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2961 | smp_mb(); |
| 2962 | again: |
| 2963 | if (!task_function_call(task, __perf_install_in_context, event)) |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2964 | return; |
| 2965 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2966 | raw_spin_lock_irq(&ctx->lock); |
| 2967 | task = ctx->task; |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2968 | if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { |
| 2969 | /* |
| 2970 | * Cannot happen because we already checked above (which also |
| 2971 | * cannot happen), and we hold ctx->mutex, which serializes us |
| 2972 | * against perf_event_exit_task_context(). |
| 2973 | */ |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 2974 | raw_spin_unlock_irq(&ctx->lock); |
| 2975 | return; |
| 2976 | } |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2977 | /* |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2978 | * If the task is not running, ctx->lock will avoid it becoming so, |
| 2979 | * thus we can safely install the event. |
Peter Zijlstra | a096309 | 2016-02-24 18:45:50 +0100 | [diff] [blame] | 2980 | */ |
Peter Zijlstra | 63cae12 | 2016-12-09 14:59:00 +0100 | [diff] [blame] | 2981 | if (task_curr(task)) { |
| 2982 | raw_spin_unlock_irq(&ctx->lock); |
| 2983 | goto again; |
| 2984 | } |
| 2985 | add_event_to_ctx(event, ctx); |
| 2986 | raw_spin_unlock_irq(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2987 | } |
| 2988 | |
| 2989 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2990 | * Cross CPU call to enable a performance event |
| 2991 | */ |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2992 | static void __perf_event_enable(struct perf_event *event, |
| 2993 | struct perf_cpu_context *cpuctx, |
| 2994 | struct perf_event_context *ctx, |
| 2995 | void *info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2996 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2997 | struct perf_event *leader = event->group_leader; |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 2998 | struct perf_event_context *task_ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2999 | |
Peter Zijlstra | 6e801e01 | 2016-01-26 12:17:08 +0100 | [diff] [blame] | 3000 | if (event->state >= PERF_EVENT_STATE_INACTIVE || |
| 3001 | event->state <= PERF_EVENT_STATE_ERROR) |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 3002 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3003 | |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 3004 | if (ctx->is_active) |
| 3005 | ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
| 3006 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 3007 | perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 3008 | perf_cgroup_event_enable(event, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3009 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 3010 | if (!ctx->is_active) |
| 3011 | return; |
| 3012 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3013 | if (!event_filter_match(event)) { |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 3014 | ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 3015 | return; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3016 | } |
Peter Zijlstra | f4c4176 | 2009-12-16 17:55:54 +0100 | [diff] [blame] | 3017 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3018 | /* |
| 3019 | * If the event is in a group and isn't the group leader, |
| 3020 | * then don't put it on unless the group is on. |
| 3021 | */ |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 3022 | if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { |
| 3023 | ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 3024 | return; |
Peter Zijlstra | bd2afa4 | 2016-02-24 18:45:49 +0100 | [diff] [blame] | 3025 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3026 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 3027 | task_ctx = cpuctx->task_ctx; |
| 3028 | if (ctx->task) |
| 3029 | WARN_ON_ONCE(task_ctx != ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3030 | |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 3031 | ctx_resched(cpuctx, task_ctx, get_event_type(event)); |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 3032 | } |
| 3033 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3034 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 3035 | * Enable an event. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3036 | * |
| 3037 | * If event->ctx is a cloned context, callers must make sure that |
| 3038 | * every task struct that event->ctx->task could possibly point to |
| 3039 | * remains valid. This condition is satisfied when called through |
| 3040 | * perf_event_for_each_child or perf_event_for_each as described |
| 3041 | * for perf_event_disable. |
| 3042 | */ |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 3043 | static void _perf_event_enable(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3044 | { |
| 3045 | struct perf_event_context *ctx = event->ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3046 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 3047 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 6e801e01 | 2016-01-26 12:17:08 +0100 | [diff] [blame] | 3048 | if (event->state >= PERF_EVENT_STATE_INACTIVE || |
| 3049 | event->state < PERF_EVENT_STATE_ERROR) { |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 3050 | out: |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 3051 | raw_spin_unlock_irq(&ctx->lock); |
| 3052 | return; |
| 3053 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3054 | |
| 3055 | /* |
| 3056 | * If the event is in error state, clear that first. |
Peter Zijlstra | 7b64801 | 2015-12-03 18:35:21 +0100 | [diff] [blame] | 3057 | * |
| 3058 | * That way, if we see the event in error state below, we know that it |
| 3059 | * has gone back into error state, as distinct from the task having |
| 3060 | * been scheduled away before the cross-call arrived. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3061 | */ |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 3062 | if (event->state == PERF_EVENT_STATE_ERROR) { |
| 3063 | /* |
| 3064 | * Detached SIBLING events cannot leave ERROR state. |
| 3065 | */ |
| 3066 | if (event->event_caps & PERF_EV_CAP_SIBLING && |
| 3067 | event->group_leader == event) |
| 3068 | goto out; |
| 3069 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3070 | event->state = PERF_EVENT_STATE_OFF; |
Kan Liang | 9f0c4fa | 2020-07-23 10:11:10 -0700 | [diff] [blame] | 3071 | } |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 3072 | raw_spin_unlock_irq(&ctx->lock); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 3073 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 3074 | event_function_call(event, __perf_event_enable, NULL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3075 | } |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 3076 | |
| 3077 | /* |
| 3078 | * See perf_event_disable(); |
| 3079 | */ |
| 3080 | void perf_event_enable(struct perf_event *event) |
| 3081 | { |
| 3082 | struct perf_event_context *ctx; |
| 3083 | |
| 3084 | ctx = perf_event_ctx_lock(event); |
| 3085 | _perf_event_enable(event); |
| 3086 | perf_event_ctx_unlock(event, ctx); |
| 3087 | } |
Robert Richter | dcfce4a | 2011-10-11 17:11:08 +0200 | [diff] [blame] | 3088 | EXPORT_SYMBOL_GPL(perf_event_enable); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3089 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3090 | struct stop_event_data { |
| 3091 | struct perf_event *event; |
| 3092 | unsigned int restart; |
| 3093 | }; |
| 3094 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 3095 | static int __perf_event_stop(void *info) |
| 3096 | { |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3097 | struct stop_event_data *sd = info; |
| 3098 | struct perf_event *event = sd->event; |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 3099 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3100 | /* if it's already INACTIVE, do nothing */ |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 3101 | if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) |
| 3102 | return 0; |
| 3103 | |
| 3104 | /* matches smp_wmb() in event_sched_in() */ |
| 3105 | smp_rmb(); |
| 3106 | |
| 3107 | /* |
| 3108 | * There is a window with interrupts enabled before we get here, |
| 3109 | * so we need to check again lest we try to stop another CPU's event. |
| 3110 | */ |
| 3111 | if (READ_ONCE(event->oncpu) != smp_processor_id()) |
| 3112 | return -EAGAIN; |
| 3113 | |
| 3114 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 3115 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3116 | /* |
| 3117 | * May race with the actual stop (through perf_pmu_output_stop()), |
| 3118 | * but it is only used for events with AUX ring buffer, and such |
| 3119 | * events will refuse to restart because of rb::aux_mmap_count==0, |
| 3120 | * see comments in perf_aux_output_begin(). |
| 3121 | * |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 3122 | * Since this is happening on an event-local CPU, no trace is lost |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3123 | * while restarting. |
| 3124 | */ |
| 3125 | if (sd->restart) |
Will Deacon | c9bbdd4 | 2016-08-15 11:42:45 +0100 | [diff] [blame] | 3126 | event->pmu->start(event, 0); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3127 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 3128 | return 0; |
| 3129 | } |
| 3130 | |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 3131 | static int perf_event_stop(struct perf_event *event, int restart) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3132 | { |
| 3133 | struct stop_event_data sd = { |
| 3134 | .event = event, |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 3135 | .restart = restart, |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3136 | }; |
| 3137 | int ret = 0; |
| 3138 | |
| 3139 | do { |
| 3140 | if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) |
| 3141 | return 0; |
| 3142 | |
| 3143 | /* matches smp_wmb() in event_sched_in() */ |
| 3144 | smp_rmb(); |
| 3145 | |
| 3146 | /* |
| 3147 | * We only want to restart ACTIVE events, so if the event goes |
| 3148 | * inactive here (event->oncpu==-1), there's nothing more to do; |
| 3149 | * fall through with ret==-ENXIO. |
| 3150 | */ |
| 3151 | ret = cpu_function_call(READ_ONCE(event->oncpu), |
| 3152 | __perf_event_stop, &sd); |
| 3153 | } while (ret == -EAGAIN); |
| 3154 | |
| 3155 | return ret; |
| 3156 | } |
| 3157 | |
| 3158 | /* |
| 3159 | * In order to contain the amount of racy and tricky in the address filter |
| 3160 | * configuration management, it is a two part process: |
| 3161 | * |
| 3162 | * (p1) when userspace mappings change as a result of (1) or (2) or (3) below, |
| 3163 | * we update the addresses of corresponding vmas in |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 3164 | * event::addr_filter_ranges array and bump the event::addr_filters_gen; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3165 | * (p2) when an event is scheduled in (pmu::add), it calls |
| 3166 | * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync() |
| 3167 | * if the generation has changed since the previous call. |
| 3168 | * |
| 3169 | * If (p1) happens while the event is active, we restart it to force (p2). |
| 3170 | * |
| 3171 | * (1) perf_addr_filters_apply(): adjusting filters' offsets based on |
| 3172 | * pre-existing mappings, called once when new filters arrive via SET_FILTER |
| 3173 | * ioctl; |
| 3174 | * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 3175 | * registered mapping, called for every new mmap(), with mm::mmap_lock down |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 3176 | * for reading; |
| 3177 | * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process |
| 3178 | * of exec. |
| 3179 | */ |
| 3180 | void perf_event_addr_filters_sync(struct perf_event *event) |
| 3181 | { |
| 3182 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 3183 | |
| 3184 | if (!has_addr_filter(event)) |
| 3185 | return; |
| 3186 | |
| 3187 | raw_spin_lock(&ifh->lock); |
| 3188 | if (event->addr_filters_gen != event->hw.addr_filters_gen) { |
| 3189 | event->pmu->addr_filters_sync(event); |
| 3190 | event->hw.addr_filters_gen = event->addr_filters_gen; |
| 3191 | } |
| 3192 | raw_spin_unlock(&ifh->lock); |
| 3193 | } |
| 3194 | EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync); |
| 3195 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 3196 | static int _perf_event_refresh(struct perf_event *event, int refresh) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3197 | { |
| 3198 | /* |
| 3199 | * not supported on inherited events |
| 3200 | */ |
Franck Bui-Huu | 2e939d1 | 2010-11-23 16:21:44 +0100 | [diff] [blame] | 3201 | if (event->attr.inherit || !is_sampling_event(event)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3202 | return -EINVAL; |
| 3203 | |
| 3204 | atomic_add(refresh, &event->event_limit); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 3205 | _perf_event_enable(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3206 | |
| 3207 | return 0; |
| 3208 | } |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 3209 | |
| 3210 | /* |
| 3211 | * See perf_event_disable() |
| 3212 | */ |
| 3213 | int perf_event_refresh(struct perf_event *event, int refresh) |
| 3214 | { |
| 3215 | struct perf_event_context *ctx; |
| 3216 | int ret; |
| 3217 | |
| 3218 | ctx = perf_event_ctx_lock(event); |
| 3219 | ret = _perf_event_refresh(event, refresh); |
| 3220 | perf_event_ctx_unlock(event, ctx); |
| 3221 | |
| 3222 | return ret; |
| 3223 | } |
Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 3224 | EXPORT_SYMBOL_GPL(perf_event_refresh); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3225 | |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3226 | static int perf_event_modify_breakpoint(struct perf_event *bp, |
| 3227 | struct perf_event_attr *attr) |
| 3228 | { |
| 3229 | int err; |
| 3230 | |
| 3231 | _perf_event_disable(bp); |
| 3232 | |
| 3233 | err = modify_user_hw_breakpoint_check(bp, attr, true); |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3234 | |
Jiri Olsa | bf06278 | 2018-08-27 11:12:28 +0200 | [diff] [blame] | 3235 | if (!bp->attr.disabled) |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3236 | _perf_event_enable(bp); |
Jiri Olsa | bf06278 | 2018-08-27 11:12:28 +0200 | [diff] [blame] | 3237 | |
| 3238 | return err; |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3239 | } |
| 3240 | |
Marco Elver | 3c25fc9 | 2022-01-31 11:34:05 +0100 | [diff] [blame] | 3241 | /* |
| 3242 | * Copy event-type-independent attributes that may be modified. |
| 3243 | */ |
| 3244 | static void perf_event_modify_copy_attr(struct perf_event_attr *to, |
| 3245 | const struct perf_event_attr *from) |
| 3246 | { |
| 3247 | to->sig_data = from->sig_data; |
| 3248 | } |
| 3249 | |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3250 | static int perf_event_modify_attr(struct perf_event *event, |
| 3251 | struct perf_event_attr *attr) |
| 3252 | { |
Marco Elver | 47f661e | 2021-04-08 12:35:57 +0200 | [diff] [blame] | 3253 | int (*func)(struct perf_event *, struct perf_event_attr *); |
| 3254 | struct perf_event *child; |
| 3255 | int err; |
| 3256 | |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3257 | if (event->attr.type != attr->type) |
| 3258 | return -EINVAL; |
| 3259 | |
| 3260 | switch (event->attr.type) { |
| 3261 | case PERF_TYPE_BREAKPOINT: |
Marco Elver | 47f661e | 2021-04-08 12:35:57 +0200 | [diff] [blame] | 3262 | func = perf_event_modify_breakpoint; |
| 3263 | break; |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3264 | default: |
| 3265 | /* Place holder for future additions. */ |
| 3266 | return -EOPNOTSUPP; |
| 3267 | } |
Marco Elver | 47f661e | 2021-04-08 12:35:57 +0200 | [diff] [blame] | 3268 | |
| 3269 | WARN_ON_ONCE(event->ctx->parent_ctx); |
| 3270 | |
| 3271 | mutex_lock(&event->child_mutex); |
Marco Elver | 3c25fc9 | 2022-01-31 11:34:05 +0100 | [diff] [blame] | 3272 | /* |
| 3273 | * Event-type-independent attributes must be copied before event-type |
| 3274 | * modification, which will validate that final attributes match the |
| 3275 | * source attributes after all relevant attributes have been copied. |
| 3276 | */ |
| 3277 | perf_event_modify_copy_attr(&event->attr, attr); |
Marco Elver | 47f661e | 2021-04-08 12:35:57 +0200 | [diff] [blame] | 3278 | err = func(event, attr); |
| 3279 | if (err) |
| 3280 | goto out; |
| 3281 | list_for_each_entry(child, &event->child_list, child_list) { |
Marco Elver | 3c25fc9 | 2022-01-31 11:34:05 +0100 | [diff] [blame] | 3282 | perf_event_modify_copy_attr(&child->attr, attr); |
Marco Elver | 47f661e | 2021-04-08 12:35:57 +0200 | [diff] [blame] | 3283 | err = func(child, attr); |
| 3284 | if (err) |
| 3285 | goto out; |
| 3286 | } |
| 3287 | out: |
| 3288 | mutex_unlock(&event->child_mutex); |
| 3289 | return err; |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 3290 | } |
| 3291 | |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3292 | static void ctx_sched_out(struct perf_event_context *ctx, |
| 3293 | struct perf_cpu_context *cpuctx, |
| 3294 | enum event_type_t event_type) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3295 | { |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 3296 | struct perf_event *event, *tmp; |
Peter Zijlstra | db24d33 | 2011-04-09 21:17:45 +0200 | [diff] [blame] | 3297 | int is_active = ctx->is_active; |
Peter Zijlstra | c994d61 | 2016-01-08 09:20:23 +0100 | [diff] [blame] | 3298 | |
| 3299 | lockdep_assert_held(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3300 | |
Peter Zijlstra | 39a4364 | 2016-01-11 12:46:35 +0100 | [diff] [blame] | 3301 | if (likely(!ctx->nr_events)) { |
| 3302 | /* |
| 3303 | * See __perf_remove_from_context(). |
| 3304 | */ |
| 3305 | WARN_ON_ONCE(ctx->is_active); |
| 3306 | if (ctx->task) |
| 3307 | WARN_ON_ONCE(cpuctx->task_ctx); |
| 3308 | return; |
| 3309 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3310 | |
Peter Zijlstra | 8fdc653 | 2016-03-29 09:26:44 +0200 | [diff] [blame] | 3311 | /* |
| 3312 | * Always update time if it was set; not only when it changes. |
| 3313 | * Otherwise we can 'forget' to update time for any but the last |
| 3314 | * context we sched out. For example: |
| 3315 | * |
| 3316 | * ctx_sched_out(.event_type = EVENT_FLEXIBLE) |
| 3317 | * ctx_sched_out(.event_type = EVENT_PINNED) |
| 3318 | * |
| 3319 | * would only update time for the pinned events. |
| 3320 | */ |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3321 | if (is_active & EVENT_TIME) { |
| 3322 | /* update (and stop) ctx time */ |
| 3323 | update_context_time(ctx); |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 3324 | update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx); |
| 3325 | /* |
| 3326 | * CPU-release for the below ->is_active store, |
| 3327 | * see __load_acquire() in perf_event_time_now() |
| 3328 | */ |
| 3329 | barrier(); |
| 3330 | } |
| 3331 | |
| 3332 | ctx->is_active &= ~event_type; |
| 3333 | if (!(ctx->is_active & EVENT_ALL)) |
| 3334 | ctx->is_active = 0; |
| 3335 | |
| 3336 | if (ctx->task) { |
| 3337 | WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
| 3338 | if (!ctx->is_active) |
| 3339 | cpuctx->task_ctx = NULL; |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3340 | } |
| 3341 | |
Peter Zijlstra | 8fdc653 | 2016-03-29 09:26:44 +0200 | [diff] [blame] | 3342 | is_active ^= ctx->is_active; /* changed bits */ |
| 3343 | |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3344 | if (!ctx->nr_active || !(is_active & EVENT_ALL)) |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3345 | return; |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3346 | |
Peter Zijlstra | 075e0b0 | 2011-04-09 21:17:40 +0200 | [diff] [blame] | 3347 | perf_pmu_disable(ctx->pmu); |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3348 | if (is_active & EVENT_PINNED) { |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 3349 | list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 3350 | group_sched_out(event, cpuctx, ctx); |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 3351 | } |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 3352 | |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3353 | if (is_active & EVENT_FLEXIBLE) { |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 3354 | list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) |
Xiao Guangrong | 8c9ed8e | 2009-09-25 13:51:17 +0800 | [diff] [blame] | 3355 | group_sched_out(event, cpuctx, ctx); |
Peter Zijlstra | 90c91df | 2020-03-05 13:38:51 +0100 | [diff] [blame] | 3356 | |
| 3357 | /* |
| 3358 | * Since we cleared EVENT_FLEXIBLE, also clear |
| 3359 | * rotate_necessary, is will be reset by |
| 3360 | * ctx_flexible_sched_in() when needed. |
| 3361 | */ |
| 3362 | ctx->rotate_necessary = 0; |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 3363 | } |
Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 3364 | perf_pmu_enable(ctx->pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3365 | } |
| 3366 | |
| 3367 | /* |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3368 | * Test whether two contexts are equivalent, i.e. whether they have both been |
| 3369 | * cloned from the same version of the same context. |
| 3370 | * |
| 3371 | * Equivalence is measured using a generation number in the context that is |
| 3372 | * incremented on each modification to it; see unclone_ctx(), list_add_event() |
| 3373 | * and list_del_event(). |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3374 | */ |
| 3375 | static int context_equiv(struct perf_event_context *ctx1, |
| 3376 | struct perf_event_context *ctx2) |
| 3377 | { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 3378 | lockdep_assert_held(&ctx1->lock); |
| 3379 | lockdep_assert_held(&ctx2->lock); |
| 3380 | |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3381 | /* Pinning disables the swap optimization */ |
| 3382 | if (ctx1->pin_count || ctx2->pin_count) |
| 3383 | return 0; |
| 3384 | |
| 3385 | /* If ctx1 is the parent of ctx2 */ |
| 3386 | if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) |
| 3387 | return 1; |
| 3388 | |
| 3389 | /* If ctx2 is the parent of ctx1 */ |
| 3390 | if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) |
| 3391 | return 1; |
| 3392 | |
| 3393 | /* |
| 3394 | * If ctx1 and ctx2 have the same parent; we flatten the parent |
| 3395 | * hierarchy, see perf_event_init_context(). |
| 3396 | */ |
| 3397 | if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && |
| 3398 | ctx1->parent_gen == ctx2->parent_gen) |
| 3399 | return 1; |
| 3400 | |
| 3401 | /* Unmatched */ |
| 3402 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3403 | } |
| 3404 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3405 | static void __perf_event_sync_stat(struct perf_event *event, |
| 3406 | struct perf_event *next_event) |
| 3407 | { |
| 3408 | u64 value; |
| 3409 | |
| 3410 | if (!event->attr.inherit_stat) |
| 3411 | return; |
| 3412 | |
| 3413 | /* |
| 3414 | * Update the event value, we cannot use perf_event_read() |
| 3415 | * because we're in the middle of a context switch and have IRQs |
| 3416 | * disabled, which upsets smp_call_function_single(), however |
| 3417 | * we know the event must be on the current CPU, therefore we |
| 3418 | * don't need to use it. |
| 3419 | */ |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 3420 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
Peter Zijlstra | 3dbebf1 | 2009-11-20 22:19:52 +0100 | [diff] [blame] | 3421 | event->pmu->read(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3422 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 3423 | perf_event_update_time(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3424 | |
| 3425 | /* |
| 3426 | * In order to keep per-task stats reliable we need to flip the event |
| 3427 | * values when we flip the contexts. |
| 3428 | */ |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 3429 | value = local64_read(&next_event->count); |
| 3430 | value = local64_xchg(&event->count, value); |
| 3431 | local64_set(&next_event->count, value); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3432 | |
| 3433 | swap(event->total_time_enabled, next_event->total_time_enabled); |
| 3434 | swap(event->total_time_running, next_event->total_time_running); |
| 3435 | |
| 3436 | /* |
| 3437 | * Since we swizzled the values, update the user visible data too. |
| 3438 | */ |
| 3439 | perf_event_update_userpage(event); |
| 3440 | perf_event_update_userpage(next_event); |
| 3441 | } |
| 3442 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3443 | static void perf_event_sync_stat(struct perf_event_context *ctx, |
| 3444 | struct perf_event_context *next_ctx) |
| 3445 | { |
| 3446 | struct perf_event *event, *next_event; |
| 3447 | |
| 3448 | if (!ctx->nr_stat) |
| 3449 | return; |
| 3450 | |
Peter Zijlstra | 02ffdbc | 2009-11-20 22:19:50 +0100 | [diff] [blame] | 3451 | update_context_time(ctx); |
| 3452 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3453 | event = list_first_entry(&ctx->event_list, |
| 3454 | struct perf_event, event_entry); |
| 3455 | |
| 3456 | next_event = list_first_entry(&next_ctx->event_list, |
| 3457 | struct perf_event, event_entry); |
| 3458 | |
| 3459 | while (&event->event_entry != &ctx->event_list && |
| 3460 | &next_event->event_entry != &next_ctx->event_list) { |
| 3461 | |
| 3462 | __perf_event_sync_stat(event, next_event); |
| 3463 | |
| 3464 | event = list_next_entry(event, event_entry); |
| 3465 | next_event = list_next_entry(next_event, event_entry); |
| 3466 | } |
| 3467 | } |
| 3468 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 3469 | static void perf_event_context_sched_out(struct task_struct *task, int ctxn, |
| 3470 | struct task_struct *next) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3471 | { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3472 | struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3473 | struct perf_event_context *next_ctx; |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3474 | struct perf_event_context *parent, *next_parent; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 3475 | struct perf_cpu_context *cpuctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3476 | int do_switch = 1; |
Kan Liang | 44fae179 | 2020-08-21 12:57:53 -0700 | [diff] [blame] | 3477 | struct pmu *pmu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3478 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 3479 | if (likely(!ctx)) |
| 3480 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3481 | |
Kan Liang | 44fae179 | 2020-08-21 12:57:53 -0700 | [diff] [blame] | 3482 | pmu = ctx->pmu; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 3483 | cpuctx = __get_cpu_context(ctx); |
| 3484 | if (!cpuctx->task_ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3485 | return; |
| 3486 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3487 | rcu_read_lock(); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3488 | next_ctx = next->perf_event_ctxp[ctxn]; |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3489 | if (!next_ctx) |
| 3490 | goto unlock; |
| 3491 | |
| 3492 | parent = rcu_dereference(ctx->parent_ctx); |
| 3493 | next_parent = rcu_dereference(next_ctx->parent_ctx); |
| 3494 | |
| 3495 | /* If neither context have a parent context; they cannot be clones. */ |
Jiri Olsa | 802c8a6 | 2014-09-12 13:18:28 +0200 | [diff] [blame] | 3496 | if (!parent && !next_parent) |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3497 | goto unlock; |
| 3498 | |
| 3499 | if (next_parent == ctx || next_ctx == parent || next_parent == parent) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3500 | /* |
| 3501 | * Looks like the two contexts are clones, so we might be |
| 3502 | * able to optimize the context switch. We lock both |
| 3503 | * contexts and check that they are clones under the |
| 3504 | * lock (including re-checking that neither has been |
| 3505 | * uncloned in the meantime). It doesn't matter which |
| 3506 | * order we take the locks because no other cpu could |
| 3507 | * be trying to lock both of these tasks. |
| 3508 | */ |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 3509 | raw_spin_lock(&ctx->lock); |
| 3510 | raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3511 | if (context_equiv(ctx, next_ctx)) { |
Alexey Budankov | c2b98a8 | 2019-10-23 10:13:56 +0300 | [diff] [blame] | 3512 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 3513 | WRITE_ONCE(ctx->task, next); |
| 3514 | WRITE_ONCE(next_ctx->task, task); |
Yan, Zheng | 5a158c3 | 2014-11-04 21:56:02 -0500 | [diff] [blame] | 3515 | |
Kan Liang | 44fae179 | 2020-08-21 12:57:53 -0700 | [diff] [blame] | 3516 | perf_pmu_disable(pmu); |
| 3517 | |
| 3518 | if (cpuctx->sched_cb_usage && pmu->sched_task) |
| 3519 | pmu->sched_task(ctx, false); |
| 3520 | |
Alexey Budankov | c2b98a8 | 2019-10-23 10:13:56 +0300 | [diff] [blame] | 3521 | /* |
| 3522 | * PMU specific parts of task perf context can require |
| 3523 | * additional synchronization. As an example of such |
| 3524 | * synchronization see implementation details of Intel |
| 3525 | * LBR call stack data profiling; |
| 3526 | */ |
| 3527 | if (pmu->swap_task_ctx) |
| 3528 | pmu->swap_task_ctx(ctx, next_ctx); |
| 3529 | else |
| 3530 | swap(ctx->task_ctx_data, next_ctx->task_ctx_data); |
Yan, Zheng | 5a158c3 | 2014-11-04 21:56:02 -0500 | [diff] [blame] | 3531 | |
Kan Liang | 44fae179 | 2020-08-21 12:57:53 -0700 | [diff] [blame] | 3532 | perf_pmu_enable(pmu); |
| 3533 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 3534 | /* |
| 3535 | * RCU_INIT_POINTER here is safe because we've not |
| 3536 | * modified the ctx and the above modification of |
| 3537 | * ctx->task and ctx->task_ctx_data are immaterial |
| 3538 | * since those values are always verified under |
| 3539 | * ctx->lock which we're now holding. |
| 3540 | */ |
| 3541 | RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); |
| 3542 | RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx); |
| 3543 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3544 | do_switch = 0; |
| 3545 | |
| 3546 | perf_event_sync_stat(ctx, next_ctx); |
| 3547 | } |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 3548 | raw_spin_unlock(&next_ctx->lock); |
| 3549 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3550 | } |
Peter Zijlstra | 5a3126d | 2013-10-07 17:12:48 +0200 | [diff] [blame] | 3551 | unlock: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3552 | rcu_read_unlock(); |
| 3553 | |
| 3554 | if (do_switch) { |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3555 | raw_spin_lock(&ctx->lock); |
Kan Liang | 44fae179 | 2020-08-21 12:57:53 -0700 | [diff] [blame] | 3556 | perf_pmu_disable(pmu); |
| 3557 | |
| 3558 | if (cpuctx->sched_cb_usage && pmu->sched_task) |
| 3559 | pmu->sched_task(ctx, false); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 3560 | task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); |
Kan Liang | 44fae179 | 2020-08-21 12:57:53 -0700 | [diff] [blame] | 3561 | |
| 3562 | perf_pmu_enable(pmu); |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3563 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3564 | } |
| 3565 | } |
| 3566 | |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 3567 | static DEFINE_PER_CPU(struct list_head, sched_cb_list); |
| 3568 | |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3569 | void perf_sched_cb_dec(struct pmu *pmu) |
| 3570 | { |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3571 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| 3572 | |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 3573 | this_cpu_dec(perf_sched_cb_usages); |
| 3574 | |
| 3575 | if (!--cpuctx->sched_cb_usage) |
| 3576 | list_del(&cpuctx->sched_cb_entry); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3577 | } |
| 3578 | |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3579 | |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3580 | void perf_sched_cb_inc(struct pmu *pmu) |
| 3581 | { |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 3582 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
| 3583 | |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 3584 | if (!cpuctx->sched_cb_usage++) |
| 3585 | list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list)); |
| 3586 | |
| 3587 | this_cpu_inc(perf_sched_cb_usages); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3588 | } |
| 3589 | |
| 3590 | /* |
| 3591 | * This function provides the context switch callback to the lower code |
| 3592 | * layer. It is invoked ONLY when the context switch callback is enabled. |
Peter Zijlstra | 09e61b4f | 2016-07-06 18:02:43 +0200 | [diff] [blame] | 3593 | * |
| 3594 | * This callback is relevant even to per-cpu events; for example multi event |
| 3595 | * PEBS requires this to provide PID/TID information. This requires we flush |
| 3596 | * all queued PEBS records before we context switch to a new task. |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 3597 | */ |
Kan Liang | 556ccca | 2020-08-21 12:57:52 -0700 | [diff] [blame] | 3598 | static void __perf_pmu_sched_task(struct perf_cpu_context *cpuctx, bool sched_in) |
| 3599 | { |
| 3600 | struct pmu *pmu; |
| 3601 | |
| 3602 | pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */ |
| 3603 | |
| 3604 | if (WARN_ON_ONCE(!pmu->sched_task)) |
| 3605 | return; |
| 3606 | |
| 3607 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
| 3608 | perf_pmu_disable(pmu); |
| 3609 | |
| 3610 | pmu->sched_task(cpuctx->task_ctx, sched_in); |
| 3611 | |
| 3612 | perf_pmu_enable(pmu); |
| 3613 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
| 3614 | } |
| 3615 | |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 3616 | static void perf_pmu_sched_task(struct task_struct *prev, |
| 3617 | struct task_struct *next, |
| 3618 | bool sched_in) |
| 3619 | { |
| 3620 | struct perf_cpu_context *cpuctx; |
| 3621 | |
| 3622 | if (prev == next) |
| 3623 | return; |
| 3624 | |
| 3625 | list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) { |
| 3626 | /* will be handled in perf_event_context_sched_in/out */ |
| 3627 | if (cpuctx->task_ctx) |
| 3628 | continue; |
| 3629 | |
| 3630 | __perf_pmu_sched_task(cpuctx, sched_in); |
| 3631 | } |
| 3632 | } |
| 3633 | |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 3634 | static void perf_event_switch(struct task_struct *task, |
| 3635 | struct task_struct *next_prev, bool sched_in); |
| 3636 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3637 | #define for_each_task_context_nr(ctxn) \ |
| 3638 | for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) |
| 3639 | |
| 3640 | /* |
| 3641 | * Called from scheduler to remove the events of the current task, |
| 3642 | * with interrupts disabled. |
| 3643 | * |
| 3644 | * We stop each event and update the event value in event->count. |
| 3645 | * |
| 3646 | * This does not protect us against NMI, but disable() |
| 3647 | * sets the disabled bit in the control field of event _before_ |
| 3648 | * accessing the event control register. If a NMI hits, then it will |
| 3649 | * not restart the event. |
| 3650 | */ |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 3651 | void __perf_event_task_sched_out(struct task_struct *task, |
| 3652 | struct task_struct *next) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3653 | { |
| 3654 | int ctxn; |
| 3655 | |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 3656 | if (__this_cpu_read(perf_sched_cb_usages)) |
| 3657 | perf_pmu_sched_task(task, next, false); |
| 3658 | |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 3659 | if (atomic_read(&nr_switch_events)) |
| 3660 | perf_event_switch(task, next, false); |
| 3661 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3662 | for_each_task_context_nr(ctxn) |
| 3663 | perf_event_context_sched_out(task, ctxn, next); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3664 | |
| 3665 | /* |
| 3666 | * if cgroup events exist on this CPU, then we need |
| 3667 | * to check if we have to switch out PMU state. |
| 3668 | * cgroup event are system-wide mode only |
| 3669 | */ |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 3670 | if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 3671 | perf_cgroup_sched_out(task, next); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3672 | } |
| 3673 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3674 | /* |
| 3675 | * Called with IRQs disabled |
| 3676 | */ |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3677 | static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, |
| 3678 | enum event_type_t event_type) |
| 3679 | { |
| 3680 | ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3681 | } |
| 3682 | |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3683 | static bool perf_less_group_idx(const void *l, const void *r) |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3684 | { |
Ian Rogers | 24fb6b8 | 2020-03-21 09:43:31 -0700 | [diff] [blame] | 3685 | const struct perf_event *le = *(const struct perf_event **)l; |
| 3686 | const struct perf_event *re = *(const struct perf_event **)r; |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3687 | |
| 3688 | return le->group_index < re->group_index; |
| 3689 | } |
| 3690 | |
| 3691 | static void swap_ptr(void *l, void *r) |
| 3692 | { |
| 3693 | void **lp = l, **rp = r; |
| 3694 | |
| 3695 | swap(*lp, *rp); |
| 3696 | } |
| 3697 | |
| 3698 | static const struct min_heap_callbacks perf_min_heap = { |
| 3699 | .elem_size = sizeof(struct perf_event *), |
| 3700 | .less = perf_less_group_idx, |
| 3701 | .swp = swap_ptr, |
| 3702 | }; |
| 3703 | |
| 3704 | static void __heap_add(struct min_heap *heap, struct perf_event *event) |
| 3705 | { |
| 3706 | struct perf_event **itrs = heap->data; |
| 3707 | |
| 3708 | if (event) { |
| 3709 | itrs[heap->nr] = event; |
| 3710 | heap->nr++; |
| 3711 | } |
| 3712 | } |
| 3713 | |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame] | 3714 | static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, |
| 3715 | struct perf_event_groups *groups, int cpu, |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3716 | int (*func)(struct perf_event *, void *), |
| 3717 | void *data) |
| 3718 | { |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 3719 | #ifdef CONFIG_CGROUP_PERF |
| 3720 | struct cgroup_subsys_state *css = NULL; |
| 3721 | #endif |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3722 | /* Space for per CPU and/or any CPU event iterators. */ |
| 3723 | struct perf_event *itrs[2]; |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame] | 3724 | struct min_heap event_heap; |
| 3725 | struct perf_event **evt; |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3726 | int ret; |
| 3727 | |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame] | 3728 | if (cpuctx) { |
| 3729 | event_heap = (struct min_heap){ |
| 3730 | .data = cpuctx->heap, |
| 3731 | .nr = 0, |
| 3732 | .size = cpuctx->heap_size, |
| 3733 | }; |
Ian Rogers | c2283c9 | 2020-02-13 23:51:32 -0800 | [diff] [blame] | 3734 | |
| 3735 | lockdep_assert_held(&cpuctx->ctx.lock); |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 3736 | |
| 3737 | #ifdef CONFIG_CGROUP_PERF |
| 3738 | if (cpuctx->cgrp) |
| 3739 | css = &cpuctx->cgrp->css; |
| 3740 | #endif |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame] | 3741 | } else { |
| 3742 | event_heap = (struct min_heap){ |
| 3743 | .data = itrs, |
| 3744 | .nr = 0, |
| 3745 | .size = ARRAY_SIZE(itrs), |
| 3746 | }; |
| 3747 | /* Events not within a CPU context may be on any CPU. */ |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 3748 | __heap_add(&event_heap, perf_event_groups_first(groups, -1, NULL)); |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame] | 3749 | } |
| 3750 | evt = event_heap.data; |
| 3751 | |
Ian Rogers | 95ed6c7 | 2020-02-13 23:51:33 -0800 | [diff] [blame] | 3752 | __heap_add(&event_heap, perf_event_groups_first(groups, cpu, NULL)); |
| 3753 | |
| 3754 | #ifdef CONFIG_CGROUP_PERF |
| 3755 | for (; css; css = css->parent) |
| 3756 | __heap_add(&event_heap, perf_event_groups_first(groups, cpu, css->cgroup)); |
| 3757 | #endif |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3758 | |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3759 | min_heapify_all(&event_heap, &perf_min_heap); |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3760 | |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3761 | while (event_heap.nr) { |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3762 | ret = func(*evt, data); |
| 3763 | if (ret) |
| 3764 | return ret; |
| 3765 | |
| 3766 | *evt = perf_event_groups_next(*evt); |
Ian Rogers | 6eef8a71 | 2020-02-13 23:51:30 -0800 | [diff] [blame] | 3767 | if (*evt) |
| 3768 | min_heapify(&event_heap, 0, &perf_min_heap); |
| 3769 | else |
| 3770 | min_heap_pop(&event_heap, &perf_min_heap); |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3771 | } |
| 3772 | |
| 3773 | return 0; |
| 3774 | } |
| 3775 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 3776 | /* |
| 3777 | * Because the userpage is strictly per-event (there is no concept of context, |
| 3778 | * so there cannot be a context indirection), every userpage must be updated |
| 3779 | * when context time starts :-( |
| 3780 | * |
| 3781 | * IOW, we must not miss EVENT_TIME edges. |
| 3782 | */ |
Song Liu | f792565 | 2021-09-29 12:43:13 -0700 | [diff] [blame] | 3783 | static inline bool event_update_userpage(struct perf_event *event) |
| 3784 | { |
| 3785 | if (likely(!atomic_read(&event->mmap_count))) |
| 3786 | return false; |
| 3787 | |
| 3788 | perf_event_update_time(event); |
Song Liu | f792565 | 2021-09-29 12:43:13 -0700 | [diff] [blame] | 3789 | perf_event_update_userpage(event); |
| 3790 | |
| 3791 | return true; |
| 3792 | } |
| 3793 | |
| 3794 | static inline void group_update_userpage(struct perf_event *group_event) |
| 3795 | { |
| 3796 | struct perf_event *event; |
| 3797 | |
| 3798 | if (!event_update_userpage(group_event)) |
| 3799 | return; |
| 3800 | |
| 3801 | for_each_sibling_event(event, group_event) |
| 3802 | event_update_userpage(event); |
| 3803 | } |
| 3804 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3805 | static int merge_sched_in(struct perf_event *event, void *data) |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3806 | { |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3807 | struct perf_event_context *ctx = event->ctx; |
| 3808 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 3809 | int *can_add_hw = data; |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3810 | |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3811 | if (event->state <= PERF_EVENT_STATE_OFF) |
| 3812 | return 0; |
| 3813 | |
| 3814 | if (!event_filter_match(event)) |
| 3815 | return 0; |
| 3816 | |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3817 | if (group_can_go_on(event, cpuctx, *can_add_hw)) { |
| 3818 | if (!group_sched_in(event, cpuctx, ctx)) |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3819 | list_add_tail(&event->active_list, get_event_list(event)); |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 3820 | } |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3821 | |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3822 | if (event->state == PERF_EVENT_STATE_INACTIVE) { |
Song Liu | f792565 | 2021-09-29 12:43:13 -0700 | [diff] [blame] | 3823 | *can_add_hw = 0; |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 3824 | if (event->attr.pinned) { |
| 3825 | perf_cgroup_event_disable(event, ctx); |
Peter Zijlstra | ab6f824 | 2019-08-07 11:17:00 +0200 | [diff] [blame] | 3826 | perf_event_set_state(event, PERF_EVENT_STATE_ERROR); |
Song Liu | f792565 | 2021-09-29 12:43:13 -0700 | [diff] [blame] | 3827 | } else { |
| 3828 | ctx->rotate_necessary = 1; |
| 3829 | perf_mux_hrtimer_restart(cpuctx); |
| 3830 | group_update_userpage(event); |
Peter Zijlstra | 33238c5 | 2020-03-18 20:33:37 +0100 | [diff] [blame] | 3831 | } |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3832 | } |
| 3833 | |
| 3834 | return 0; |
| 3835 | } |
| 3836 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3837 | static void |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3838 | ctx_pinned_sched_in(struct perf_event_context *ctx, |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 3839 | struct perf_cpu_context *cpuctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3840 | { |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3841 | int can_add_hw = 1; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3842 | |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame] | 3843 | if (ctx != &cpuctx->ctx) |
| 3844 | cpuctx = NULL; |
| 3845 | |
| 3846 | visit_groups_merge(cpuctx, &ctx->pinned_groups, |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3847 | smp_processor_id(), |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3848 | merge_sched_in, &can_add_hw); |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3849 | } |
| 3850 | |
| 3851 | static void |
| 3852 | ctx_flexible_sched_in(struct perf_event_context *ctx, |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 3853 | struct perf_cpu_context *cpuctx) |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3854 | { |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3855 | int can_add_hw = 1; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3856 | |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame] | 3857 | if (ctx != &cpuctx->ctx) |
| 3858 | cpuctx = NULL; |
| 3859 | |
| 3860 | visit_groups_merge(cpuctx, &ctx->flexible_groups, |
Peter Zijlstra | 1cac7b1 | 2017-11-13 14:28:30 +0100 | [diff] [blame] | 3861 | smp_processor_id(), |
Peter Zijlstra | 2c2366c | 2019-08-07 11:45:01 +0200 | [diff] [blame] | 3862 | merge_sched_in, &can_add_hw); |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3863 | } |
| 3864 | |
| 3865 | static void |
| 3866 | ctx_sched_in(struct perf_event_context *ctx, |
| 3867 | struct perf_cpu_context *cpuctx, |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3868 | enum event_type_t event_type, |
| 3869 | struct task_struct *task) |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3870 | { |
Peter Zijlstra | db24d33 | 2011-04-09 21:17:45 +0200 | [diff] [blame] | 3871 | int is_active = ctx->is_active; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3872 | |
Peter Zijlstra | c994d61 | 2016-01-08 09:20:23 +0100 | [diff] [blame] | 3873 | lockdep_assert_held(&ctx->lock); |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3874 | |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3875 | if (likely(!ctx->nr_events)) |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3876 | return; |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3877 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 3878 | if (is_active ^ EVENT_TIME) { |
| 3879 | /* start ctx time */ |
| 3880 | __update_context_time(ctx, false); |
| 3881 | perf_cgroup_set_timestamp(task, ctx); |
| 3882 | /* |
| 3883 | * CPU-release for the below ->is_active store, |
| 3884 | * see __load_acquire() in perf_event_time_now() |
| 3885 | */ |
| 3886 | barrier(); |
| 3887 | } |
| 3888 | |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3889 | ctx->is_active |= (event_type | EVENT_TIME); |
Peter Zijlstra | 63e30d3 | 2016-01-08 11:39:10 +0100 | [diff] [blame] | 3890 | if (ctx->task) { |
| 3891 | if (!is_active) |
| 3892 | cpuctx->task_ctx = ctx; |
| 3893 | else |
| 3894 | WARN_ON_ONCE(cpuctx->task_ctx != ctx); |
| 3895 | } |
| 3896 | |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3897 | is_active ^= ctx->is_active; /* changed bits */ |
| 3898 | |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3899 | /* |
| 3900 | * First go through the list and put on any pinned groups |
| 3901 | * in order to give them the best chance of going on. |
| 3902 | */ |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3903 | if (is_active & EVENT_PINNED) |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 3904 | ctx_pinned_sched_in(ctx, cpuctx); |
Frederic Weisbecker | 5b0311e | 2010-01-17 11:59:13 +0100 | [diff] [blame] | 3905 | |
| 3906 | /* Then walk through the lower prio flexible groups */ |
Peter Zijlstra | 3cbaa59 | 2016-02-24 18:45:47 +0100 | [diff] [blame] | 3907 | if (is_active & EVENT_FLEXIBLE) |
Peter Zijlstra | 6e37738 | 2010-02-11 13:21:58 +0100 | [diff] [blame] | 3908 | ctx_flexible_sched_in(ctx, cpuctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3909 | } |
| 3910 | |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3911 | static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3912 | enum event_type_t event_type, |
| 3913 | struct task_struct *task) |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3914 | { |
| 3915 | struct perf_event_context *ctx = &cpuctx->ctx; |
| 3916 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3917 | ctx_sched_in(ctx, cpuctx, event_type, task); |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3918 | } |
| 3919 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 3920 | static void perf_event_context_sched_in(struct perf_event_context *ctx, |
| 3921 | struct task_struct *task) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3922 | { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3923 | struct perf_cpu_context *cpuctx; |
Peter Zijlstra | 012669c | 2021-06-22 16:21:01 +0200 | [diff] [blame] | 3924 | struct pmu *pmu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3925 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 3926 | cpuctx = __get_cpu_context(ctx); |
Peter Zijlstra | 012669c | 2021-06-22 16:21:01 +0200 | [diff] [blame] | 3927 | |
| 3928 | /* |
| 3929 | * HACK: for HETEROGENEOUS the task context might have switched to a |
| 3930 | * different PMU, force (re)set the context, |
| 3931 | */ |
| 3932 | pmu = ctx->pmu = cpuctx->ctx.pmu; |
| 3933 | |
Kan Liang | 556ccca | 2020-08-21 12:57:52 -0700 | [diff] [blame] | 3934 | if (cpuctx->task_ctx == ctx) { |
| 3935 | if (cpuctx->sched_cb_usage) |
| 3936 | __perf_pmu_sched_task(cpuctx, true); |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3937 | return; |
Kan Liang | 556ccca | 2020-08-21 12:57:52 -0700 | [diff] [blame] | 3938 | } |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3939 | |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3940 | perf_ctx_lock(cpuctx, ctx); |
leilei.lin | fdccc3f | 2017-08-09 08:29:21 +0800 | [diff] [blame] | 3941 | /* |
| 3942 | * We must check ctx->nr_events while holding ctx->lock, such |
| 3943 | * that we serialize against perf_install_in_context(). |
| 3944 | */ |
| 3945 | if (!ctx->nr_events) |
| 3946 | goto unlock; |
| 3947 | |
Kan Liang | 556ccca | 2020-08-21 12:57:52 -0700 | [diff] [blame] | 3948 | perf_pmu_disable(pmu); |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3949 | /* |
| 3950 | * We want to keep the following priority order: |
| 3951 | * cpu pinned (that don't need to move), task pinned, |
| 3952 | * cpu flexible, task flexible. |
Alexander Shishkin | fe45baf | 2017-01-19 18:43:29 +0200 | [diff] [blame] | 3953 | * |
| 3954 | * However, if task's ctx is not carrying any pinned |
| 3955 | * events, no need to flip the cpuctx's events around. |
Frederic Weisbecker | 329c0e0 | 2010-01-17 12:56:05 +0100 | [diff] [blame] | 3956 | */ |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 3957 | if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) |
Alexander Shishkin | fe45baf | 2017-01-19 18:43:29 +0200 | [diff] [blame] | 3958 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
Peter Zijlstra | 63e30d3 | 2016-01-08 11:39:10 +0100 | [diff] [blame] | 3959 | perf_event_sched_in(cpuctx, ctx, task); |
Kan Liang | 556ccca | 2020-08-21 12:57:52 -0700 | [diff] [blame] | 3960 | |
| 3961 | if (cpuctx->sched_cb_usage && pmu->sched_task) |
| 3962 | pmu->sched_task(cpuctx->task_ctx, true); |
| 3963 | |
| 3964 | perf_pmu_enable(pmu); |
leilei.lin | fdccc3f | 2017-08-09 08:29:21 +0800 | [diff] [blame] | 3965 | |
| 3966 | unlock: |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 3967 | perf_ctx_unlock(cpuctx, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 3968 | } |
| 3969 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3970 | /* |
| 3971 | * Called from scheduler to add the events of the current task |
| 3972 | * with interrupts disabled. |
| 3973 | * |
| 3974 | * We restore the event value and then enable it. |
| 3975 | * |
| 3976 | * This does not protect us against NMI, but enable() |
| 3977 | * sets the enabled bit in the control field of event _before_ |
| 3978 | * accessing the event control register. If a NMI hits, then it will |
| 3979 | * keep the event running. |
| 3980 | */ |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 3981 | void __perf_event_task_sched_in(struct task_struct *prev, |
| 3982 | struct task_struct *task) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3983 | { |
| 3984 | struct perf_event_context *ctx; |
| 3985 | int ctxn; |
| 3986 | |
Peter Zijlstra | 7e41d17 | 2016-01-08 09:21:40 +0100 | [diff] [blame] | 3987 | /* |
| 3988 | * If cgroup events exist on this CPU, then we need to check if we have |
| 3989 | * to switch in PMU state; cgroup event are system-wide mode only. |
| 3990 | * |
| 3991 | * Since cgroup events are CPU events, we must schedule these in before |
| 3992 | * we schedule in the task events. |
| 3993 | */ |
| 3994 | if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) |
| 3995 | perf_cgroup_sched_in(prev, task); |
| 3996 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 3997 | for_each_task_context_nr(ctxn) { |
| 3998 | ctx = task->perf_event_ctxp[ctxn]; |
| 3999 | if (likely(!ctx)) |
| 4000 | continue; |
| 4001 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 4002 | perf_event_context_sched_in(ctx, task); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4003 | } |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 4004 | |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 4005 | if (atomic_read(&nr_switch_events)) |
| 4006 | perf_event_switch(task, prev, true); |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 4007 | |
| 4008 | if (__this_cpu_read(perf_sched_cb_usages)) |
| 4009 | perf_pmu_sched_task(prev, task, true); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4010 | } |
| 4011 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4012 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
| 4013 | { |
| 4014 | u64 frequency = event->attr.sample_freq; |
| 4015 | u64 sec = NSEC_PER_SEC; |
| 4016 | u64 divisor, dividend; |
| 4017 | |
| 4018 | int count_fls, nsec_fls, frequency_fls, sec_fls; |
| 4019 | |
| 4020 | count_fls = fls64(count); |
| 4021 | nsec_fls = fls64(nsec); |
| 4022 | frequency_fls = fls64(frequency); |
| 4023 | sec_fls = 30; |
| 4024 | |
| 4025 | /* |
| 4026 | * We got @count in @nsec, with a target of sample_freq HZ |
| 4027 | * the target period becomes: |
| 4028 | * |
| 4029 | * @count * 10^9 |
| 4030 | * period = ------------------- |
| 4031 | * @nsec * sample_freq |
| 4032 | * |
| 4033 | */ |
| 4034 | |
| 4035 | /* |
| 4036 | * Reduce accuracy by one bit such that @a and @b converge |
| 4037 | * to a similar magnitude. |
| 4038 | */ |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4039 | #define REDUCE_FLS(a, b) \ |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4040 | do { \ |
| 4041 | if (a##_fls > b##_fls) { \ |
| 4042 | a >>= 1; \ |
| 4043 | a##_fls--; \ |
| 4044 | } else { \ |
| 4045 | b >>= 1; \ |
| 4046 | b##_fls--; \ |
| 4047 | } \ |
| 4048 | } while (0) |
| 4049 | |
| 4050 | /* |
| 4051 | * Reduce accuracy until either term fits in a u64, then proceed with |
| 4052 | * the other, so that finally we can do a u64/u64 division. |
| 4053 | */ |
| 4054 | while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { |
| 4055 | REDUCE_FLS(nsec, frequency); |
| 4056 | REDUCE_FLS(sec, count); |
| 4057 | } |
| 4058 | |
| 4059 | if (count_fls + sec_fls > 64) { |
| 4060 | divisor = nsec * frequency; |
| 4061 | |
| 4062 | while (count_fls + sec_fls > 64) { |
| 4063 | REDUCE_FLS(count, sec); |
| 4064 | divisor >>= 1; |
| 4065 | } |
| 4066 | |
| 4067 | dividend = count * sec; |
| 4068 | } else { |
| 4069 | dividend = count * sec; |
| 4070 | |
| 4071 | while (nsec_fls + frequency_fls > 64) { |
| 4072 | REDUCE_FLS(nsec, frequency); |
| 4073 | dividend >>= 1; |
| 4074 | } |
| 4075 | |
| 4076 | divisor = nsec * frequency; |
| 4077 | } |
| 4078 | |
Peter Zijlstra | f6ab91ad | 2010-06-04 15:18:01 +0200 | [diff] [blame] | 4079 | if (!divisor) |
| 4080 | return dividend; |
| 4081 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4082 | return div64_u64(dividend, divisor); |
| 4083 | } |
| 4084 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4085 | static DEFINE_PER_CPU(int, perf_throttled_count); |
| 4086 | static DEFINE_PER_CPU(u64, perf_throttled_seq); |
| 4087 | |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 4088 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4089 | { |
| 4090 | struct hw_perf_event *hwc = &event->hw; |
Peter Zijlstra | f6ab91ad | 2010-06-04 15:18:01 +0200 | [diff] [blame] | 4091 | s64 period, sample_period; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4092 | s64 delta; |
| 4093 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4094 | period = perf_calculate_period(event, nsec, count); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4095 | |
| 4096 | delta = (s64)(period - hwc->sample_period); |
| 4097 | delta = (delta + 7) / 8; /* low pass filter */ |
| 4098 | |
| 4099 | sample_period = hwc->sample_period + delta; |
| 4100 | |
| 4101 | if (!sample_period) |
| 4102 | sample_period = 1; |
| 4103 | |
| 4104 | hwc->sample_period = sample_period; |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4105 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 4106 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 4107 | if (disable) |
| 4108 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 4109 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 4110 | local64_set(&hwc->period_left, 0); |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 4111 | |
| 4112 | if (disable) |
| 4113 | event->pmu->start(event, PERF_EF_RELOAD); |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4114 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4115 | } |
| 4116 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4117 | /* |
| 4118 | * combine freq adjustment with unthrottling to avoid two passes over the |
| 4119 | * events. At the same time, make sure, having freq events does not change |
| 4120 | * the rate of unthrottling as that would introduce bias. |
| 4121 | */ |
| 4122 | static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, |
| 4123 | int needs_unthr) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4124 | { |
| 4125 | struct perf_event *event; |
| 4126 | struct hw_perf_event *hwc; |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4127 | u64 now, period = TICK_NSEC; |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4128 | s64 delta; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4129 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4130 | /* |
| 4131 | * only need to iterate over all events iff: |
| 4132 | * - context have events in frequency mode (needs freq adjust) |
| 4133 | * - there are events to unthrottle on this cpu |
| 4134 | */ |
| 4135 | if (!(ctx->nr_freq || needs_unthr)) |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 4136 | return; |
| 4137 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4138 | raw_spin_lock(&ctx->lock); |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 4139 | perf_pmu_disable(ctx->pmu); |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4140 | |
Paul Mackerras | 03541f8 | 2009-10-14 16:58:03 +1100 | [diff] [blame] | 4141 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4142 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 4143 | continue; |
| 4144 | |
Stephane Eranian | 5632ab1 | 2011-01-03 18:20:01 +0200 | [diff] [blame] | 4145 | if (!event_filter_match(event)) |
Peter Zijlstra | 5d27c23 | 2009-12-17 13:16:32 +0100 | [diff] [blame] | 4146 | continue; |
| 4147 | |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 4148 | perf_pmu_disable(event->pmu); |
| 4149 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4150 | hwc = &event->hw; |
| 4151 | |
Jiri Olsa | ae23bff | 2013-08-24 16:45:54 +0200 | [diff] [blame] | 4152 | if (hwc->interrupts == MAX_INTERRUPTS) { |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4153 | hwc->interrupts = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4154 | perf_log_throttle(event, 1); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 4155 | event->pmu->start(event, 0); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4156 | } |
| 4157 | |
| 4158 | if (!event->attr.freq || !event->attr.sample_freq) |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 4159 | goto next; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4160 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4161 | /* |
| 4162 | * stop the event and update event->count |
| 4163 | */ |
| 4164 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 4165 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 4166 | now = local64_read(&event->count); |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4167 | delta = now - hwc->freq_count_stamp; |
| 4168 | hwc->freq_count_stamp = now; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4169 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4170 | /* |
| 4171 | * restart the event |
| 4172 | * reload only if value has changed |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 4173 | * we have stopped the event so tell that |
| 4174 | * to perf_adjust_period() to avoid stopping it |
| 4175 | * twice. |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4176 | */ |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 4177 | if (delta > 0) |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 4178 | perf_adjust_period(event, period, delta, false); |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4179 | |
| 4180 | event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); |
Alexander Shishkin | 4437727 | 2013-12-16 14:17:36 +0200 | [diff] [blame] | 4181 | next: |
| 4182 | perf_pmu_enable(event->pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4183 | } |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4184 | |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 4185 | perf_pmu_enable(ctx->pmu); |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4186 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4187 | } |
| 4188 | |
| 4189 | /* |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 4190 | * Move @event to the tail of the @ctx's elegible events. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4191 | */ |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 4192 | static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4193 | { |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 4194 | /* |
| 4195 | * Rotate the first entry last of non-pinned groups. Rotation might be |
| 4196 | * disabled by the inheritance code. |
| 4197 | */ |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 4198 | if (ctx->rotate_disable) |
| 4199 | return; |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 4200 | |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 4201 | perf_event_groups_delete(&ctx->flexible_groups, event); |
| 4202 | perf_event_groups_insert(&ctx->flexible_groups, event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4203 | } |
| 4204 | |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 4205 | /* pick an event from the flexible_groups to rotate */ |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4206 | static inline struct perf_event * |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 4207 | ctx_event_to_rotate(struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4208 | { |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 4209 | struct perf_event *event; |
| 4210 | |
| 4211 | /* pick the first active flexible event */ |
| 4212 | event = list_first_entry_or_null(&ctx->flexible_active, |
| 4213 | struct perf_event, active_list); |
| 4214 | |
| 4215 | /* if no active flexible event, pick the first event */ |
| 4216 | if (!event) { |
| 4217 | event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), |
| 4218 | typeof(*event), group_node); |
| 4219 | } |
| 4220 | |
Peter Zijlstra | 90c91df | 2020-03-05 13:38:51 +0100 | [diff] [blame] | 4221 | /* |
| 4222 | * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in() |
| 4223 | * finds there are unschedulable events, it will set it again. |
| 4224 | */ |
| 4225 | ctx->rotate_necessary = 0; |
| 4226 | |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 4227 | return event; |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4228 | } |
| 4229 | |
| 4230 | static bool perf_rotate_context(struct perf_cpu_context *cpuctx) |
| 4231 | { |
| 4232 | struct perf_event *cpu_event = NULL, *task_event = NULL; |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 4233 | struct perf_event_context *task_ctx = NULL; |
| 4234 | int cpu_rotate, task_rotate; |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4235 | |
| 4236 | /* |
| 4237 | * Since we run this from IRQ context, nobody can install new |
| 4238 | * events, thus the event count values are stable. |
| 4239 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4240 | |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 4241 | cpu_rotate = cpuctx->ctx.rotate_necessary; |
| 4242 | task_ctx = cpuctx->task_ctx; |
| 4243 | task_rotate = task_ctx ? task_ctx->rotate_necessary : 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4244 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4245 | if (!(cpu_rotate || task_rotate)) |
| 4246 | return false; |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 4247 | |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 4248 | perf_ctx_lock(cpuctx, cpuctx->task_ctx); |
Peter Zijlstra | 1b9a644 | 2010-09-07 18:32:22 +0200 | [diff] [blame] | 4249 | perf_pmu_disable(cpuctx->ctx.pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4250 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4251 | if (task_rotate) |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 4252 | task_event = ctx_event_to_rotate(task_ctx); |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4253 | if (cpu_rotate) |
Song Liu | 7fa343b7 | 2019-10-08 09:59:49 -0700 | [diff] [blame] | 4254 | cpu_event = ctx_event_to_rotate(&cpuctx->ctx); |
Peter Zijlstra | 8703a7c | 2017-11-13 14:28:44 +0100 | [diff] [blame] | 4255 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4256 | /* |
| 4257 | * As per the order given at ctx_resched() first 'pop' task flexible |
| 4258 | * and then, if needed CPU flexible. |
| 4259 | */ |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 4260 | if (task_event || (task_ctx && cpu_event)) |
| 4261 | ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE); |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4262 | if (cpu_event) |
| 4263 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
Peter Zijlstra | d4944a0 | 2010-03-08 13:51:20 +0100 | [diff] [blame] | 4264 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4265 | if (task_event) |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 4266 | rotate_ctx(task_ctx, task_event); |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4267 | if (cpu_event) |
| 4268 | rotate_ctx(&cpuctx->ctx, cpu_event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4269 | |
Ian Rogers | fd7d551 | 2019-06-01 01:27:22 -0700 | [diff] [blame] | 4270 | perf_event_sched_in(cpuctx, task_ctx, current); |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 4271 | |
| 4272 | perf_pmu_enable(cpuctx->ctx.pmu); |
| 4273 | perf_ctx_unlock(cpuctx, cpuctx->task_ctx); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 4274 | |
Peter Zijlstra | 8d5bce0 | 2018-03-09 14:56:27 +0100 | [diff] [blame] | 4275 | return true; |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 4276 | } |
| 4277 | |
| 4278 | void perf_event_task_tick(void) |
| 4279 | { |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 4280 | struct list_head *head = this_cpu_ptr(&active_ctx_list); |
| 4281 | struct perf_event_context *ctx, *tmp; |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4282 | int throttled; |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 4283 | |
Frederic Weisbecker | 1644464 | 2017-11-06 16:01:24 +0100 | [diff] [blame] | 4284 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 4285 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4286 | __this_cpu_inc(perf_throttled_seq); |
| 4287 | throttled = __this_cpu_xchg(perf_throttled_count, 0); |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 4288 | tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4289 | |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 4290 | list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 4291 | perf_adjust_freq_unthr_context(ctx, throttled); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4292 | } |
| 4293 | |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 4294 | static int event_enable_on_exec(struct perf_event *event, |
| 4295 | struct perf_event_context *ctx) |
| 4296 | { |
| 4297 | if (!event->attr.enable_on_exec) |
| 4298 | return 0; |
| 4299 | |
| 4300 | event->attr.enable_on_exec = 0; |
| 4301 | if (event->state >= PERF_EVENT_STATE_INACTIVE) |
| 4302 | return 0; |
| 4303 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4304 | perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 4305 | |
| 4306 | return 1; |
| 4307 | } |
| 4308 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4309 | /* |
| 4310 | * Enable all of a task's events that have been marked enable-on-exec. |
| 4311 | * This expects task == current. |
| 4312 | */ |
Peter Zijlstra | c127449 | 2015-12-10 20:57:40 +0100 | [diff] [blame] | 4313 | static void perf_event_enable_on_exec(int ctxn) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4314 | { |
Peter Zijlstra | c127449 | 2015-12-10 20:57:40 +0100 | [diff] [blame] | 4315 | struct perf_event_context *ctx, *clone_ctx = NULL; |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 4316 | enum event_type_t event_type = 0; |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 4317 | struct perf_cpu_context *cpuctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4318 | struct perf_event *event; |
| 4319 | unsigned long flags; |
| 4320 | int enabled = 0; |
| 4321 | |
| 4322 | local_irq_save(flags); |
Peter Zijlstra | c127449 | 2015-12-10 20:57:40 +0100 | [diff] [blame] | 4323 | ctx = current->perf_event_ctxp[ctxn]; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4324 | if (!ctx || !ctx->nr_events) |
| 4325 | goto out; |
| 4326 | |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 4327 | cpuctx = __get_cpu_context(ctx); |
| 4328 | perf_ctx_lock(cpuctx, ctx); |
Peter Zijlstra | 7fce250 | 2016-02-24 18:45:48 +0100 | [diff] [blame] | 4329 | ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 4330 | list_for_each_entry(event, &ctx->event_list, event_entry) { |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 4331 | enabled |= event_enable_on_exec(event, ctx); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 4332 | event_type |= get_event_type(event); |
| 4333 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4334 | |
| 4335 | /* |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 4336 | * Unclone and reschedule this context if we enabled any event. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4337 | */ |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 4338 | if (enabled) { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 4339 | clone_ctx = unclone_ctx(ctx); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 4340 | ctx_resched(cpuctx, ctx, event_type); |
Peter Zijlstra | 7bbba0e | 2017-02-15 16:12:20 +0100 | [diff] [blame] | 4341 | } else { |
| 4342 | ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); |
Peter Zijlstra | 3e34950 | 2016-01-08 10:01:18 +0100 | [diff] [blame] | 4343 | } |
| 4344 | perf_ctx_unlock(cpuctx, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4345 | |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 4346 | out: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4347 | local_irq_restore(flags); |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 4348 | |
| 4349 | if (clone_ctx) |
| 4350 | put_ctx(clone_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4351 | } |
| 4352 | |
Marco Elver | 2e498d0 | 2021-04-08 12:35:59 +0200 | [diff] [blame] | 4353 | static void perf_remove_from_owner(struct perf_event *event); |
| 4354 | static void perf_event_exit_event(struct perf_event *event, |
| 4355 | struct perf_event_context *ctx); |
| 4356 | |
| 4357 | /* |
| 4358 | * Removes all events from the current task that have been marked |
| 4359 | * remove-on-exec, and feeds their values back to parent events. |
| 4360 | */ |
| 4361 | static void perf_event_remove_on_exec(int ctxn) |
| 4362 | { |
| 4363 | struct perf_event_context *ctx, *clone_ctx = NULL; |
| 4364 | struct perf_event *event, *next; |
| 4365 | LIST_HEAD(free_list); |
| 4366 | unsigned long flags; |
| 4367 | bool modified = false; |
| 4368 | |
| 4369 | ctx = perf_pin_task_context(current, ctxn); |
| 4370 | if (!ctx) |
| 4371 | return; |
| 4372 | |
| 4373 | mutex_lock(&ctx->mutex); |
| 4374 | |
| 4375 | if (WARN_ON_ONCE(ctx->task != current)) |
| 4376 | goto unlock; |
| 4377 | |
| 4378 | list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { |
| 4379 | if (!event->attr.remove_on_exec) |
| 4380 | continue; |
| 4381 | |
| 4382 | if (!is_kernel_event(event)) |
| 4383 | perf_remove_from_owner(event); |
| 4384 | |
| 4385 | modified = true; |
| 4386 | |
| 4387 | perf_event_exit_event(event, ctx); |
| 4388 | } |
| 4389 | |
| 4390 | raw_spin_lock_irqsave(&ctx->lock, flags); |
| 4391 | if (modified) |
| 4392 | clone_ctx = unclone_ctx(ctx); |
| 4393 | --ctx->pin_count; |
| 4394 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
| 4395 | |
| 4396 | unlock: |
| 4397 | mutex_unlock(&ctx->mutex); |
| 4398 | |
| 4399 | put_ctx(ctx); |
| 4400 | if (clone_ctx) |
| 4401 | put_ctx(clone_ctx); |
| 4402 | } |
| 4403 | |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4404 | struct perf_read_data { |
| 4405 | struct perf_event *event; |
| 4406 | bool group; |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4407 | int ret; |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4408 | }; |
| 4409 | |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4410 | static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4411 | { |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4412 | u16 local_pkg, event_pkg; |
| 4413 | |
| 4414 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4415 | int local_cpu = smp_processor_id(); |
| 4416 | |
| 4417 | event_pkg = topology_physical_package_id(event_cpu); |
| 4418 | local_pkg = topology_physical_package_id(local_cpu); |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4419 | |
| 4420 | if (event_pkg == local_pkg) |
| 4421 | return local_cpu; |
| 4422 | } |
| 4423 | |
| 4424 | return event_cpu; |
| 4425 | } |
| 4426 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4427 | /* |
| 4428 | * Cross CPU call to read the hardware event |
| 4429 | */ |
| 4430 | static void __perf_event_read(void *info) |
| 4431 | { |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4432 | struct perf_read_data *data = info; |
| 4433 | struct perf_event *sub, *event = data->event; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4434 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4435 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4436 | struct pmu *pmu = event->pmu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4437 | |
| 4438 | /* |
| 4439 | * If this is a task context, we need to check whether it is |
| 4440 | * the current task context of this cpu. If not it has been |
| 4441 | * scheduled out before the smp call arrived. In that case |
| 4442 | * event->count would have been updated to a recent sample |
| 4443 | * when the event was scheduled out. |
| 4444 | */ |
| 4445 | if (ctx->task && cpuctx->task_ctx != ctx) |
| 4446 | return; |
| 4447 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4448 | raw_spin_lock(&ctx->lock); |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4449 | if (ctx->is_active & EVENT_TIME) { |
Peter Zijlstra | 542e72f | 2011-01-26 15:38:35 +0100 | [diff] [blame] | 4450 | update_context_time(ctx); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 4451 | update_cgrp_time_from_event(event); |
| 4452 | } |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4453 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4454 | perf_event_update_time(event); |
| 4455 | if (data->group) |
| 4456 | perf_event_update_sibling_time(event); |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4457 | |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4458 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4459 | goto unlock; |
| 4460 | |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4461 | if (!data->group) { |
| 4462 | pmu->read(event); |
| 4463 | data->ret = 0; |
| 4464 | goto unlock; |
| 4465 | } |
| 4466 | |
| 4467 | pmu->start_txn(pmu, PERF_PMU_TXN_READ); |
| 4468 | |
| 4469 | pmu->read(event); |
| 4470 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 4471 | for_each_sibling_event(sub, event) { |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4472 | if (sub->state == PERF_EVENT_STATE_ACTIVE) { |
| 4473 | /* |
| 4474 | * Use sibling's PMU rather than @event's since |
| 4475 | * sibling could be on different (eg: software) PMU. |
| 4476 | */ |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4477 | sub->pmu->read(sub); |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4478 | } |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4479 | } |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 4480 | |
| 4481 | data->ret = pmu->commit_txn(pmu); |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4482 | |
| 4483 | unlock: |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4484 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4485 | } |
| 4486 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 4487 | static inline u64 perf_event_count(struct perf_event *event) |
| 4488 | { |
Vikas Shivappa | c39a0e2 | 2017-07-25 14:14:20 -0700 | [diff] [blame] | 4489 | return local64_read(&event->count) + atomic64_read(&event->child_count); |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 4490 | } |
| 4491 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 4492 | static void calc_timer_values(struct perf_event *event, |
| 4493 | u64 *now, |
| 4494 | u64 *enabled, |
| 4495 | u64 *running) |
| 4496 | { |
| 4497 | u64 ctx_time; |
| 4498 | |
| 4499 | *now = perf_clock(); |
| 4500 | ctx_time = perf_event_time_now(event, *now); |
| 4501 | __perf_update_times(event, ctx_time, enabled, running); |
| 4502 | } |
| 4503 | |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4504 | /* |
| 4505 | * NMI-safe method to read a local event, that is an event that |
| 4506 | * is: |
| 4507 | * - either for the current task, or for this CPU |
| 4508 | * - does not have inherit set, for inherited task events |
| 4509 | * will not be local and we cannot read them atomically |
| 4510 | * - must not have a pmu::count method |
| 4511 | */ |
Yonghong Song | 7d9285e | 2017-10-05 09:19:19 -0700 | [diff] [blame] | 4512 | int perf_event_read_local(struct perf_event *event, u64 *value, |
| 4513 | u64 *enabled, u64 *running) |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4514 | { |
| 4515 | unsigned long flags; |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4516 | int ret = 0; |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4517 | |
| 4518 | /* |
| 4519 | * Disabling interrupts avoids all counter scheduling (context |
| 4520 | * switches, timer based rotation and IPIs). |
| 4521 | */ |
| 4522 | local_irq_save(flags); |
| 4523 | |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4524 | /* |
| 4525 | * It must not be an event with inherit set, we cannot read |
| 4526 | * all child counters from atomic context. |
| 4527 | */ |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4528 | if (event->attr.inherit) { |
| 4529 | ret = -EOPNOTSUPP; |
| 4530 | goto out; |
| 4531 | } |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4532 | |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4533 | /* If this is a per-task event, it must be for current */ |
| 4534 | if ((event->attach_state & PERF_ATTACH_TASK) && |
| 4535 | event->hw.target != current) { |
| 4536 | ret = -EINVAL; |
| 4537 | goto out; |
| 4538 | } |
| 4539 | |
| 4540 | /* If this is a per-CPU event, it must be for this CPU */ |
| 4541 | if (!(event->attach_state & PERF_ATTACH_TASK) && |
| 4542 | event->cpu != smp_processor_id()) { |
| 4543 | ret = -EINVAL; |
| 4544 | goto out; |
| 4545 | } |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4546 | |
Reinette Chatre | befb1b3 | 2018-09-19 10:29:06 -0700 | [diff] [blame] | 4547 | /* If this is a pinned event it must be running on this CPU */ |
| 4548 | if (event->attr.pinned && event->oncpu != smp_processor_id()) { |
| 4549 | ret = -EBUSY; |
| 4550 | goto out; |
| 4551 | } |
| 4552 | |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4553 | /* |
| 4554 | * If the event is currently on this CPU, its either a per-task event, |
| 4555 | * or local to this CPU. Furthermore it means its ACTIVE (otherwise |
| 4556 | * oncpu == -1). |
| 4557 | */ |
| 4558 | if (event->oncpu == smp_processor_id()) |
| 4559 | event->pmu->read(event); |
| 4560 | |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4561 | *value = local64_read(&event->count); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4562 | if (enabled || running) { |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 4563 | u64 __enabled, __running, __now;; |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4564 | |
Peter Zijlstra | 09f5e7d | 2021-12-20 13:19:52 +0100 | [diff] [blame] | 4565 | calc_timer_values(event, &__now, &__enabled, &__running); |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4566 | if (enabled) |
| 4567 | *enabled = __enabled; |
| 4568 | if (running) |
| 4569 | *running = __running; |
| 4570 | } |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4571 | out: |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4572 | local_irq_restore(flags); |
| 4573 | |
Alexei Starovoitov | f91840a | 2017-06-02 21:03:52 -0700 | [diff] [blame] | 4574 | return ret; |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 4575 | } |
| 4576 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4577 | static int perf_event_read(struct perf_event *event, bool group) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4578 | { |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4579 | enum perf_event_state state = READ_ONCE(event->state); |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4580 | int event_cpu, ret = 0; |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4581 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4582 | /* |
| 4583 | * If event is enabled and currently active on a CPU, update the |
| 4584 | * value in the event structure: |
| 4585 | */ |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4586 | again: |
| 4587 | if (state == PERF_EVENT_STATE_ACTIVE) { |
| 4588 | struct perf_read_data data; |
| 4589 | |
| 4590 | /* |
| 4591 | * Orders the ->state and ->oncpu loads such that if we see |
| 4592 | * ACTIVE we must also see the right ->oncpu. |
| 4593 | * |
| 4594 | * Matches the smp_wmb() from event_sched_in(). |
| 4595 | */ |
| 4596 | smp_rmb(); |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4597 | |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4598 | event_cpu = READ_ONCE(event->oncpu); |
| 4599 | if ((unsigned)event_cpu >= nr_cpu_ids) |
| 4600 | return 0; |
| 4601 | |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4602 | data = (struct perf_read_data){ |
| 4603 | .event = event, |
| 4604 | .group = group, |
| 4605 | .ret = 0, |
| 4606 | }; |
| 4607 | |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4608 | preempt_disable(); |
| 4609 | event_cpu = __perf_event_read_cpu(event, event_cpu); |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 4610 | |
Peter Zijlstra | 5876314 | 2016-08-30 10:15:03 +0200 | [diff] [blame] | 4611 | /* |
| 4612 | * Purposely ignore the smp_call_function_single() return |
| 4613 | * value. |
| 4614 | * |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4615 | * If event_cpu isn't a valid CPU it means the event got |
Peter Zijlstra | 5876314 | 2016-08-30 10:15:03 +0200 | [diff] [blame] | 4616 | * scheduled out and that will have updated the event count. |
| 4617 | * |
| 4618 | * Therefore, either way, we'll have an up-to-date event count |
| 4619 | * after this. |
| 4620 | */ |
Peter Zijlstra | 451d24d | 2017-01-31 11:27:10 +0100 | [diff] [blame] | 4621 | (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); |
| 4622 | preempt_enable(); |
Peter Zijlstra | 5876314 | 2016-08-30 10:15:03 +0200 | [diff] [blame] | 4623 | ret = data.ret; |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4624 | |
| 4625 | } else if (state == PERF_EVENT_STATE_INACTIVE) { |
Peter Zijlstra | 2b8988c | 2009-11-20 22:19:54 +0100 | [diff] [blame] | 4626 | struct perf_event_context *ctx = event->ctx; |
| 4627 | unsigned long flags; |
| 4628 | |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4629 | raw_spin_lock_irqsave(&ctx->lock, flags); |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4630 | state = event->state; |
| 4631 | if (state != PERF_EVENT_STATE_INACTIVE) { |
| 4632 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
| 4633 | goto again; |
| 4634 | } |
| 4635 | |
Stephane Eranian | c530ccd | 2010-10-15 15:26:01 +0200 | [diff] [blame] | 4636 | /* |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4637 | * May read while context is not active (e.g., thread is |
| 4638 | * blocked), in that case we cannot update context time |
Stephane Eranian | c530ccd | 2010-10-15 15:26:01 +0200 | [diff] [blame] | 4639 | */ |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4640 | if (ctx->is_active & EVENT_TIME) { |
Stephane Eranian | c530ccd | 2010-10-15 15:26:01 +0200 | [diff] [blame] | 4641 | update_context_time(ctx); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 4642 | update_cgrp_time_from_event(event); |
| 4643 | } |
Peter Zijlstra | 0c1cbc1 | 2017-09-05 16:26:44 +0200 | [diff] [blame] | 4644 | |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4645 | perf_event_update_time(event); |
Peter Zijlstra | 0492d4c | 2015-09-03 20:07:48 -0700 | [diff] [blame] | 4646 | if (group) |
Peter Zijlstra | 0d3d73a | 2017-09-05 14:16:28 +0200 | [diff] [blame] | 4647 | perf_event_update_sibling_time(event); |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4648 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4649 | } |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 4650 | |
| 4651 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4652 | } |
| 4653 | |
| 4654 | /* |
| 4655 | * Initialize the perf_event context in a task_struct: |
| 4656 | */ |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4657 | static void __perf_event_init_context(struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4658 | { |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4659 | raw_spin_lock_init(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4660 | mutex_init(&ctx->mutex); |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 4661 | INIT_LIST_HEAD(&ctx->active_ctx_list); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 4662 | perf_event_groups_init(&ctx->pinned_groups); |
| 4663 | perf_event_groups_init(&ctx->flexible_groups); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4664 | INIT_LIST_HEAD(&ctx->event_list); |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 4665 | INIT_LIST_HEAD(&ctx->pinned_active); |
| 4666 | INIT_LIST_HEAD(&ctx->flexible_active); |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 4667 | refcount_set(&ctx->refcount, 1); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4668 | } |
| 4669 | |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4670 | static struct perf_event_context * |
| 4671 | alloc_perf_context(struct pmu *pmu, struct task_struct *task) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4672 | { |
| 4673 | struct perf_event_context *ctx; |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4674 | |
| 4675 | ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); |
| 4676 | if (!ctx) |
| 4677 | return NULL; |
| 4678 | |
| 4679 | __perf_event_init_context(ctx); |
Matthew Wilcox (Oracle) | 7b3c92b | 2019-07-04 15:13:23 -0700 | [diff] [blame] | 4680 | if (task) |
| 4681 | ctx->task = get_task_struct(task); |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4682 | ctx->pmu = pmu; |
| 4683 | |
| 4684 | return ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4685 | } |
| 4686 | |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4687 | static struct task_struct * |
| 4688 | find_lively_task_by_vpid(pid_t vpid) |
| 4689 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4690 | struct task_struct *task; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4691 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4692 | rcu_read_lock(); |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4693 | if (!vpid) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4694 | task = current; |
| 4695 | else |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4696 | task = find_task_by_vpid(vpid); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4697 | if (task) |
| 4698 | get_task_struct(task); |
| 4699 | rcu_read_unlock(); |
| 4700 | |
| 4701 | if (!task) |
| 4702 | return ERR_PTR(-ESRCH); |
| 4703 | |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4704 | return task; |
Matt Helsley | 2ebd4ff | 2010-09-13 13:01:19 -0700 | [diff] [blame] | 4705 | } |
| 4706 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4707 | /* |
| 4708 | * Returns a matching context with refcount and pincount. |
| 4709 | */ |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4710 | static struct perf_event_context * |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4711 | find_get_context(struct pmu *pmu, struct task_struct *task, |
| 4712 | struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4713 | { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 4714 | struct perf_event_context *ctx, *clone_ctx = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4715 | struct perf_cpu_context *cpuctx; |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4716 | void *task_ctx_data = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4717 | unsigned long flags; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4718 | int ctxn, err; |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4719 | int cpu = event->cpu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4720 | |
Oleg Nesterov | 22a4ec7 | 2011-01-18 17:10:08 +0100 | [diff] [blame] | 4721 | if (!task) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4722 | /* Must be root to operate on a CPU event: */ |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 4723 | err = perf_allow_cpu(&event->attr); |
| 4724 | if (err) |
| 4725 | return ERR_PTR(err); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4726 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 4727 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4728 | ctx = &cpuctx->ctx; |
| 4729 | get_ctx(ctx); |
Marco Elver | 6c605f8 | 2021-05-27 12:47:11 +0200 | [diff] [blame] | 4730 | raw_spin_lock_irqsave(&ctx->lock, flags); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4731 | ++ctx->pin_count; |
Marco Elver | 6c605f8 | 2021-05-27 12:47:11 +0200 | [diff] [blame] | 4732 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4733 | |
| 4734 | return ctx; |
| 4735 | } |
| 4736 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4737 | err = -EINVAL; |
| 4738 | ctxn = pmu->task_ctx_nr; |
| 4739 | if (ctxn < 0) |
| 4740 | goto errout; |
| 4741 | |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4742 | if (event->attach_state & PERF_ATTACH_TASK_DATA) { |
Kan Liang | ff9ff92 | 2020-07-03 05:49:21 -0700 | [diff] [blame] | 4743 | task_ctx_data = alloc_task_ctx_data(pmu); |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4744 | if (!task_ctx_data) { |
| 4745 | err = -ENOMEM; |
| 4746 | goto errout; |
| 4747 | } |
| 4748 | } |
| 4749 | |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 4750 | retry: |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 4751 | ctx = perf_lock_task_context(task, ctxn, &flags); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4752 | if (ctx) { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 4753 | clone_ctx = unclone_ctx(ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4754 | ++ctx->pin_count; |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4755 | |
| 4756 | if (task_ctx_data && !ctx->task_ctx_data) { |
| 4757 | ctx->task_ctx_data = task_ctx_data; |
| 4758 | task_ctx_data = NULL; |
| 4759 | } |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 4760 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 4761 | |
| 4762 | if (clone_ctx) |
| 4763 | put_ctx(clone_ctx); |
Peter Zijlstra | 9137fb2 | 2011-04-09 21:17:41 +0200 | [diff] [blame] | 4764 | } else { |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4765 | ctx = alloc_perf_context(pmu, task); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4766 | err = -ENOMEM; |
| 4767 | if (!ctx) |
| 4768 | goto errout; |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 4769 | |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 4770 | if (task_ctx_data) { |
| 4771 | ctx->task_ctx_data = task_ctx_data; |
| 4772 | task_ctx_data = NULL; |
| 4773 | } |
| 4774 | |
Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 4775 | err = 0; |
| 4776 | mutex_lock(&task->perf_event_mutex); |
| 4777 | /* |
| 4778 | * If it has already passed perf_event_exit_task(). |
| 4779 | * we must see PF_EXITING, it takes this mutex too. |
| 4780 | */ |
| 4781 | if (task->flags & PF_EXITING) |
| 4782 | err = -ESRCH; |
| 4783 | else if (task->perf_event_ctxp[ctxn]) |
| 4784 | err = -EAGAIN; |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4785 | else { |
Peter Zijlstra | 9137fb2 | 2011-04-09 21:17:41 +0200 | [diff] [blame] | 4786 | get_ctx(ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4787 | ++ctx->pin_count; |
Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 4788 | rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 4789 | } |
Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 4790 | mutex_unlock(&task->perf_event_mutex); |
| 4791 | |
| 4792 | if (unlikely(err)) { |
Peter Zijlstra | 9137fb2 | 2011-04-09 21:17:41 +0200 | [diff] [blame] | 4793 | put_ctx(ctx); |
Oleg Nesterov | dbe08d8 | 2011-01-19 19:22:07 +0100 | [diff] [blame] | 4794 | |
| 4795 | if (err == -EAGAIN) |
| 4796 | goto retry; |
| 4797 | goto errout; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4798 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4799 | } |
| 4800 | |
Kan Liang | ff9ff92 | 2020-07-03 05:49:21 -0700 | [diff] [blame] | 4801 | free_task_ctx_data(pmu, task_ctx_data); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4802 | return ctx; |
| 4803 | |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 4804 | errout: |
Kan Liang | ff9ff92 | 2020-07-03 05:49:21 -0700 | [diff] [blame] | 4805 | free_task_ctx_data(pmu, task_ctx_data); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4806 | return ERR_PTR(err); |
| 4807 | } |
| 4808 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 4809 | static void perf_event_free_filter(struct perf_event *event); |
| 4810 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4811 | static void free_event_rcu(struct rcu_head *head) |
| 4812 | { |
| 4813 | struct perf_event *event; |
| 4814 | |
| 4815 | event = container_of(head, struct perf_event, rcu_head); |
| 4816 | if (event->ns) |
| 4817 | put_pid_ns(event->ns); |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 4818 | perf_event_free_filter(event); |
Namhyung Kim | bdacfaf | 2021-03-11 20:54:12 +0900 | [diff] [blame] | 4819 | kmem_cache_free(perf_event_cache, event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4820 | } |
| 4821 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 4822 | static void ring_buffer_attach(struct perf_event *event, |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 4823 | struct perf_buffer *rb); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 4824 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4825 | static void detach_sb_event(struct perf_event *event) |
| 4826 | { |
| 4827 | struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); |
| 4828 | |
| 4829 | raw_spin_lock(&pel->lock); |
| 4830 | list_del_rcu(&event->sb_list); |
| 4831 | raw_spin_unlock(&pel->lock); |
| 4832 | } |
| 4833 | |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4834 | static bool is_sb_event(struct perf_event *event) |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4835 | { |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4836 | struct perf_event_attr *attr = &event->attr; |
| 4837 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4838 | if (event->parent) |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4839 | return false; |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4840 | |
| 4841 | if (event->attach_state & PERF_ATTACH_TASK) |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4842 | return false; |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4843 | |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4844 | if (attr->mmap || attr->mmap_data || attr->mmap2 || |
| 4845 | attr->comm || attr->comm_exec || |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 4846 | attr->task || attr->ksymbol || |
Adrian Hunter | e17d43b | 2020-05-12 15:19:08 +0300 | [diff] [blame] | 4847 | attr->context_switch || attr->text_poke || |
Song Liu | 21038f2 | 2019-02-25 16:20:05 -0800 | [diff] [blame] | 4848 | attr->bpf_event) |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 4849 | return true; |
| 4850 | return false; |
| 4851 | } |
| 4852 | |
| 4853 | static void unaccount_pmu_sb_event(struct perf_event *event) |
| 4854 | { |
| 4855 | if (is_sb_event(event)) |
| 4856 | detach_sb_event(event); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4857 | } |
| 4858 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4859 | static void unaccount_event_cpu(struct perf_event *event, int cpu) |
| 4860 | { |
| 4861 | if (event->parent) |
| 4862 | return; |
| 4863 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4864 | if (is_cgroup_event(event)) |
| 4865 | atomic_dec(&per_cpu(perf_cgroup_events, cpu)); |
| 4866 | } |
| 4867 | |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 4868 | #ifdef CONFIG_NO_HZ_FULL |
| 4869 | static DEFINE_SPINLOCK(nr_freq_lock); |
| 4870 | #endif |
| 4871 | |
| 4872 | static void unaccount_freq_event_nohz(void) |
| 4873 | { |
| 4874 | #ifdef CONFIG_NO_HZ_FULL |
| 4875 | spin_lock(&nr_freq_lock); |
| 4876 | if (atomic_dec_and_test(&nr_freq_events)) |
| 4877 | tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS); |
| 4878 | spin_unlock(&nr_freq_lock); |
| 4879 | #endif |
| 4880 | } |
| 4881 | |
| 4882 | static void unaccount_freq_event(void) |
| 4883 | { |
| 4884 | if (tick_nohz_full_enabled()) |
| 4885 | unaccount_freq_event_nohz(); |
| 4886 | else |
| 4887 | atomic_dec(&nr_freq_events); |
| 4888 | } |
| 4889 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4890 | static void unaccount_event(struct perf_event *event) |
| 4891 | { |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4892 | bool dec = false; |
| 4893 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4894 | if (event->parent) |
| 4895 | return; |
| 4896 | |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 4897 | if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4898 | dec = true; |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4899 | if (event->attr.mmap || event->attr.mmap_data) |
| 4900 | atomic_dec(&nr_mmap_events); |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 4901 | if (event->attr.build_id) |
| 4902 | atomic_dec(&nr_build_id_events); |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4903 | if (event->attr.comm) |
| 4904 | atomic_dec(&nr_comm_events); |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 4905 | if (event->attr.namespaces) |
| 4906 | atomic_dec(&nr_namespaces_events); |
Namhyung Kim | 96aaab6 | 2020-03-25 21:45:28 +0900 | [diff] [blame] | 4907 | if (event->attr.cgroup) |
| 4908 | atomic_dec(&nr_cgroup_events); |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4909 | if (event->attr.task) |
| 4910 | atomic_dec(&nr_task_events); |
Frederic Weisbecker | 948b26b | 2013-08-02 18:29:55 +0200 | [diff] [blame] | 4911 | if (event->attr.freq) |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 4912 | unaccount_freq_event(); |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 4913 | if (event->attr.context_switch) { |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4914 | dec = true; |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 4915 | atomic_dec(&nr_switch_events); |
| 4916 | } |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4917 | if (is_cgroup_event(event)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4918 | dec = true; |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4919 | if (has_branch_stack(event)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4920 | dec = true; |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 4921 | if (event->attr.ksymbol) |
| 4922 | atomic_dec(&nr_ksymbol_events); |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 4923 | if (event->attr.bpf_event) |
| 4924 | atomic_dec(&nr_bpf_events); |
Adrian Hunter | e17d43b | 2020-05-12 15:19:08 +0300 | [diff] [blame] | 4925 | if (event->attr.text_poke) |
| 4926 | atomic_dec(&nr_text_poke_events); |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 4927 | |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 4928 | if (dec) { |
| 4929 | if (!atomic_add_unless(&perf_sched_count, -1, 1)) |
| 4930 | schedule_delayed_work(&perf_sched_work, HZ); |
| 4931 | } |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4932 | |
| 4933 | unaccount_event_cpu(event, event->cpu); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 4934 | |
| 4935 | unaccount_pmu_sb_event(event); |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 4936 | } |
| 4937 | |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 4938 | static void perf_sched_delayed(struct work_struct *work) |
| 4939 | { |
| 4940 | mutex_lock(&perf_sched_mutex); |
| 4941 | if (atomic_dec_and_test(&perf_sched_count)) |
| 4942 | static_branch_disable(&perf_sched_events); |
| 4943 | mutex_unlock(&perf_sched_mutex); |
| 4944 | } |
| 4945 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4946 | /* |
| 4947 | * The following implement mutual exclusion of events on "exclusive" pmus |
| 4948 | * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled |
| 4949 | * at a time, so we disallow creating events that might conflict, namely: |
| 4950 | * |
| 4951 | * 1) cpu-wide events in the presence of per-task events, |
| 4952 | * 2) per-task events in the presence of cpu-wide events, |
| 4953 | * 3) two matching events on the same context. |
| 4954 | * |
| 4955 | * The former two cases are handled in the allocation path (perf_event_alloc(), |
Peter Zijlstra | a0733e6 | 2016-01-26 12:14:40 +0100 | [diff] [blame] | 4956 | * _free_event()), the latter -- before the first perf_install_in_context(). |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4957 | */ |
| 4958 | static int exclusive_event_init(struct perf_event *event) |
| 4959 | { |
| 4960 | struct pmu *pmu = event->pmu; |
| 4961 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 4962 | if (!is_exclusive_pmu(pmu)) |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4963 | return 0; |
| 4964 | |
| 4965 | /* |
| 4966 | * Prevent co-existence of per-task and cpu-wide events on the |
| 4967 | * same exclusive pmu. |
| 4968 | * |
| 4969 | * Negative pmu::exclusive_cnt means there are cpu-wide |
| 4970 | * events on this "exclusive" pmu, positive means there are |
| 4971 | * per-task events. |
| 4972 | * |
| 4973 | * Since this is called in perf_event_alloc() path, event::ctx |
| 4974 | * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK |
| 4975 | * to mean "per-task event", because unlike other attach states it |
| 4976 | * never gets cleared. |
| 4977 | */ |
| 4978 | if (event->attach_state & PERF_ATTACH_TASK) { |
| 4979 | if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) |
| 4980 | return -EBUSY; |
| 4981 | } else { |
| 4982 | if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) |
| 4983 | return -EBUSY; |
| 4984 | } |
| 4985 | |
| 4986 | return 0; |
| 4987 | } |
| 4988 | |
| 4989 | static void exclusive_event_destroy(struct perf_event *event) |
| 4990 | { |
| 4991 | struct pmu *pmu = event->pmu; |
| 4992 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 4993 | if (!is_exclusive_pmu(pmu)) |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 4994 | return; |
| 4995 | |
| 4996 | /* see comment in exclusive_event_init() */ |
| 4997 | if (event->attach_state & PERF_ATTACH_TASK) |
| 4998 | atomic_dec(&pmu->exclusive_cnt); |
| 4999 | else |
| 5000 | atomic_inc(&pmu->exclusive_cnt); |
| 5001 | } |
| 5002 | |
| 5003 | static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) |
| 5004 | { |
Alexander Shishkin | 3bf6215 | 2016-09-20 18:48:11 +0300 | [diff] [blame] | 5005 | if ((e1->pmu == e2->pmu) && |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 5006 | (e1->cpu == e2->cpu || |
| 5007 | e1->cpu == -1 || |
| 5008 | e2->cpu == -1)) |
| 5009 | return true; |
| 5010 | return false; |
| 5011 | } |
| 5012 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 5013 | static bool exclusive_event_installable(struct perf_event *event, |
| 5014 | struct perf_event_context *ctx) |
| 5015 | { |
| 5016 | struct perf_event *iter_event; |
| 5017 | struct pmu *pmu = event->pmu; |
| 5018 | |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 5019 | lockdep_assert_held(&ctx->mutex); |
| 5020 | |
| 5021 | if (!is_exclusive_pmu(pmu)) |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 5022 | return true; |
| 5023 | |
| 5024 | list_for_each_entry(iter_event, &ctx->event_list, event_entry) { |
| 5025 | if (exclusive_event_match(iter_event, event)) |
| 5026 | return false; |
| 5027 | } |
| 5028 | |
| 5029 | return true; |
| 5030 | } |
| 5031 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 5032 | static void perf_addr_filters_splice(struct perf_event *event, |
| 5033 | struct list_head *head); |
| 5034 | |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 5035 | static void _free_event(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5036 | { |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 5037 | irq_work_sync(&event->pending); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5038 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 5039 | unaccount_event(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5040 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 5041 | security_perf_event_free(event); |
| 5042 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5043 | if (event->rb) { |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5044 | /* |
| 5045 | * Can happen when we close an event with re-directed output. |
| 5046 | * |
| 5047 | * Since we have a 0 refcount, perf_mmap_close() will skip |
| 5048 | * over us; possibly making our ring_buffer_put() the last. |
| 5049 | */ |
| 5050 | mutex_lock(&event->mmap_mutex); |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 5051 | ring_buffer_attach(event, NULL); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5052 | mutex_unlock(&event->mmap_mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5053 | } |
| 5054 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 5055 | if (is_cgroup_event(event)) |
| 5056 | perf_detach_cgroup(event); |
| 5057 | |
Peter Zijlstra | a0733e6 | 2016-01-26 12:14:40 +0100 | [diff] [blame] | 5058 | if (!event->parent) { |
| 5059 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) |
| 5060 | put_callchain_buffers(); |
| 5061 | } |
| 5062 | |
| 5063 | perf_event_free_bpf_prog(event); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 5064 | perf_addr_filters_splice(event, NULL); |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 5065 | kfree(event->addr_filter_ranges); |
Peter Zijlstra | a0733e6 | 2016-01-26 12:14:40 +0100 | [diff] [blame] | 5066 | |
| 5067 | if (event->destroy) |
| 5068 | event->destroy(event); |
| 5069 | |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 5070 | /* |
| 5071 | * Must be after ->destroy(), due to uprobe_perf_close() using |
| 5072 | * hw.target. |
| 5073 | */ |
Prashant Bhole | 621b6d2 | 2018-04-09 19:03:46 +0900 | [diff] [blame] | 5074 | if (event->hw.target) |
| 5075 | put_task_struct(event->hw.target); |
| 5076 | |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 5077 | /* |
| 5078 | * perf_event_free_task() relies on put_ctx() being 'last', in particular |
| 5079 | * all task references must be cleaned up. |
| 5080 | */ |
| 5081 | if (event->ctx) |
| 5082 | put_ctx(event->ctx); |
| 5083 | |
Alexander Shishkin | 62a92c8 | 2016-06-07 15:44:15 +0300 | [diff] [blame] | 5084 | exclusive_event_destroy(event); |
| 5085 | module_put(event->pmu->module); |
Peter Zijlstra | a0733e6 | 2016-01-26 12:14:40 +0100 | [diff] [blame] | 5086 | |
| 5087 | call_rcu(&event->rcu_head, free_event_rcu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5088 | } |
| 5089 | |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 5090 | /* |
| 5091 | * Used to free events which have a known refcount of 1, such as in error paths |
| 5092 | * where the event isn't exposed yet and inherited events. |
| 5093 | */ |
| 5094 | static void free_event(struct perf_event *event) |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 5095 | { |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 5096 | if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, |
| 5097 | "unexpected event refcount: %ld; ptr=%p\n", |
| 5098 | atomic_long_read(&event->refcount), event)) { |
| 5099 | /* leak to avoid use-after-free */ |
| 5100 | return; |
| 5101 | } |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 5102 | |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 5103 | _free_event(event); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 5104 | } |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 5105 | |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 5106 | /* |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 5107 | * Remove user event from the owner task. |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 5108 | */ |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 5109 | static void perf_remove_from_owner(struct perf_event *event) |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 5110 | { |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5111 | struct task_struct *owner; |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 5112 | |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5113 | rcu_read_lock(); |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5114 | /* |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 5115 | * Matches the smp_store_release() in perf_event_exit_task(). If we |
| 5116 | * observe !owner it means the list deletion is complete and we can |
| 5117 | * indeed free this event, otherwise we need to serialize on |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5118 | * owner->perf_event_mutex. |
| 5119 | */ |
Will Deacon | 506458e | 2017-10-24 11:22:48 +0100 | [diff] [blame] | 5120 | owner = READ_ONCE(event->owner); |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5121 | if (owner) { |
| 5122 | /* |
| 5123 | * Since delayed_put_task_struct() also drops the last |
| 5124 | * task reference we can safely take a new reference |
| 5125 | * while holding the rcu_read_lock(). |
| 5126 | */ |
| 5127 | get_task_struct(owner); |
| 5128 | } |
| 5129 | rcu_read_unlock(); |
| 5130 | |
| 5131 | if (owner) { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5132 | /* |
| 5133 | * If we're here through perf_event_exit_task() we're already |
| 5134 | * holding ctx->mutex which would be an inversion wrt. the |
| 5135 | * normal lock order. |
| 5136 | * |
| 5137 | * However we can safely take this lock because its the child |
| 5138 | * ctx->mutex. |
| 5139 | */ |
| 5140 | mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); |
| 5141 | |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5142 | /* |
| 5143 | * We have to re-check the event->owner field, if it is cleared |
| 5144 | * we raced with perf_event_exit_task(), acquiring the mutex |
| 5145 | * ensured they're done, and we can proceed with freeing the |
| 5146 | * event. |
| 5147 | */ |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 5148 | if (event->owner) { |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5149 | list_del_init(&event->owner_entry); |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 5150 | smp_store_release(&event->owner, NULL); |
| 5151 | } |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5152 | mutex_unlock(&owner->perf_event_mutex); |
| 5153 | put_task_struct(owner); |
| 5154 | } |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 5155 | } |
| 5156 | |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 5157 | static void put_event(struct perf_event *event) |
| 5158 | { |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 5159 | if (!atomic_long_dec_and_test(&event->refcount)) |
| 5160 | return; |
| 5161 | |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 5162 | _free_event(event); |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 5163 | } |
| 5164 | |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5165 | /* |
| 5166 | * Kill an event dead; while event:refcount will preserve the event |
| 5167 | * object, it will not preserve its functionality. Once the last 'user' |
| 5168 | * gives up the object, we'll destroy the thing. |
| 5169 | */ |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 5170 | int perf_event_release_kernel(struct perf_event *event) |
| 5171 | { |
Peter Zijlstra | a4f4bb6 | 2016-02-24 18:45:42 +0100 | [diff] [blame] | 5172 | struct perf_event_context *ctx = event->ctx; |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5173 | struct perf_event *child, *tmp; |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 5174 | LIST_HEAD(free_list); |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5175 | |
Peter Zijlstra | a4f4bb6 | 2016-02-24 18:45:42 +0100 | [diff] [blame] | 5176 | /* |
| 5177 | * If we got here through err_file: fput(event_file); we will not have |
| 5178 | * attached to a context yet. |
| 5179 | */ |
| 5180 | if (!ctx) { |
| 5181 | WARN_ON_ONCE(event->attach_state & |
| 5182 | (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP)); |
| 5183 | goto no_ctx; |
| 5184 | } |
| 5185 | |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5186 | if (!is_kernel_event(event)) |
| 5187 | perf_remove_from_owner(event); |
| 5188 | |
Peter Zijlstra | 5fa7c8e | 2016-01-26 15:25:15 +0100 | [diff] [blame] | 5189 | ctx = perf_event_ctx_lock(event); |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 5190 | WARN_ON_ONCE(ctx->parent_ctx); |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 5191 | perf_remove_from_context(event, DETACH_GROUP); |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 5192 | |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 5193 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 60beda8 | 2016-01-26 14:55:02 +0100 | [diff] [blame] | 5194 | /* |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 5195 | * Mark this event as STATE_DEAD, there is no external reference to it |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 5196 | * anymore. |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5197 | * |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 5198 | * Anybody acquiring event->child_mutex after the below loop _must_ |
| 5199 | * also see this, most importantly inherit_event() which will avoid |
| 5200 | * placing more children on the list. |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5201 | * |
| 5202 | * Thus this guarantees that we will in fact observe and kill _ALL_ |
| 5203 | * child events. |
Peter Zijlstra | 60beda8 | 2016-01-26 14:55:02 +0100 | [diff] [blame] | 5204 | */ |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 5205 | event->state = PERF_EVENT_STATE_DEAD; |
| 5206 | raw_spin_unlock_irq(&ctx->lock); |
| 5207 | |
| 5208 | perf_event_ctx_unlock(event, ctx); |
Peter Zijlstra | 60beda8 | 2016-01-26 14:55:02 +0100 | [diff] [blame] | 5209 | |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5210 | again: |
| 5211 | mutex_lock(&event->child_mutex); |
| 5212 | list_for_each_entry(child, &event->child_list, child_list) { |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 5213 | |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5214 | /* |
| 5215 | * Cannot change, child events are not migrated, see the |
| 5216 | * comment with perf_event_ctx_lock_nested(). |
| 5217 | */ |
Will Deacon | 506458e | 2017-10-24 11:22:48 +0100 | [diff] [blame] | 5218 | ctx = READ_ONCE(child->ctx); |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5219 | /* |
| 5220 | * Since child_mutex nests inside ctx::mutex, we must jump |
| 5221 | * through hoops. We start by grabbing a reference on the ctx. |
| 5222 | * |
| 5223 | * Since the event cannot get freed while we hold the |
| 5224 | * child_mutex, the context must also exist and have a !0 |
| 5225 | * reference count. |
| 5226 | */ |
| 5227 | get_ctx(ctx); |
| 5228 | |
| 5229 | /* |
| 5230 | * Now that we have a ctx ref, we can drop child_mutex, and |
| 5231 | * acquire ctx::mutex without fear of it going away. Then we |
| 5232 | * can re-acquire child_mutex. |
| 5233 | */ |
| 5234 | mutex_unlock(&event->child_mutex); |
| 5235 | mutex_lock(&ctx->mutex); |
| 5236 | mutex_lock(&event->child_mutex); |
| 5237 | |
| 5238 | /* |
| 5239 | * Now that we hold ctx::mutex and child_mutex, revalidate our |
| 5240 | * state, if child is still the first entry, it didn't get freed |
| 5241 | * and we can continue doing so. |
| 5242 | */ |
| 5243 | tmp = list_first_entry_or_null(&event->child_list, |
| 5244 | struct perf_event, child_list); |
| 5245 | if (tmp == child) { |
| 5246 | perf_remove_from_context(child, DETACH_GROUP); |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 5247 | list_move(&child->child_list, &free_list); |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5248 | /* |
| 5249 | * This matches the refcount bump in inherit_event(); |
| 5250 | * this can't be the last reference. |
| 5251 | */ |
| 5252 | put_event(event); |
| 5253 | } |
| 5254 | |
| 5255 | mutex_unlock(&event->child_mutex); |
| 5256 | mutex_unlock(&ctx->mutex); |
| 5257 | put_ctx(ctx); |
| 5258 | goto again; |
| 5259 | } |
| 5260 | mutex_unlock(&event->child_mutex); |
| 5261 | |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 5262 | list_for_each_entry_safe(child, tmp, &free_list, child_list) { |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 5263 | void *var = &child->ctx->refcount; |
| 5264 | |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 5265 | list_del(&child->child_list); |
| 5266 | free_event(child); |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 5267 | |
| 5268 | /* |
| 5269 | * Wake any perf_event_free_task() waiting for this event to be |
| 5270 | * freed. |
| 5271 | */ |
| 5272 | smp_mb(); /* pairs with wait_var_event() */ |
| 5273 | wake_up_var(var); |
Peter Zijlstra | 82d9485 | 2018-01-09 13:10:30 +0100 | [diff] [blame] | 5274 | } |
| 5275 | |
Peter Zijlstra | a4f4bb6 | 2016-02-24 18:45:42 +0100 | [diff] [blame] | 5276 | no_ctx: |
| 5277 | put_event(event); /* Must be the 'last' reference */ |
Peter Zijlstra | 683ede4 | 2014-05-05 12:11:24 +0200 | [diff] [blame] | 5278 | return 0; |
| 5279 | } |
| 5280 | EXPORT_SYMBOL_GPL(perf_event_release_kernel); |
| 5281 | |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 5282 | /* |
| 5283 | * Called when the last reference to the file is gone. |
| 5284 | */ |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 5285 | static int perf_release(struct inode *inode, struct file *file) |
| 5286 | { |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 5287 | perf_event_release_kernel(file->private_data); |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 5288 | return 0; |
Peter Zijlstra | a66a305 | 2009-11-23 11:37:23 +0100 | [diff] [blame] | 5289 | } |
| 5290 | |
Peter Zijlstra | ca0dd44 | 2017-09-05 13:23:44 +0200 | [diff] [blame] | 5291 | static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5292 | { |
| 5293 | struct perf_event *child; |
| 5294 | u64 total = 0; |
| 5295 | |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 5296 | *enabled = 0; |
| 5297 | *running = 0; |
| 5298 | |
Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 5299 | mutex_lock(&event->child_mutex); |
Sukadev Bhattiprolu | 01add3e | 2015-09-03 20:07:46 -0700 | [diff] [blame] | 5300 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5301 | (void)perf_event_read(event, false); |
Sukadev Bhattiprolu | 01add3e | 2015-09-03 20:07:46 -0700 | [diff] [blame] | 5302 | total += perf_event_count(event); |
| 5303 | |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 5304 | *enabled += event->total_time_enabled + |
| 5305 | atomic64_read(&event->child_total_time_enabled); |
| 5306 | *running += event->total_time_running + |
| 5307 | atomic64_read(&event->child_total_time_running); |
| 5308 | |
| 5309 | list_for_each_entry(child, &event->child_list, child_list) { |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5310 | (void)perf_event_read(child, false); |
Sukadev Bhattiprolu | 01add3e | 2015-09-03 20:07:46 -0700 | [diff] [blame] | 5311 | total += perf_event_count(child); |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 5312 | *enabled += child->total_time_enabled; |
| 5313 | *running += child->total_time_running; |
| 5314 | } |
Peter Zijlstra | 6f10581 | 2009-11-20 22:19:56 +0100 | [diff] [blame] | 5315 | mutex_unlock(&event->child_mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5316 | |
| 5317 | return total; |
| 5318 | } |
Peter Zijlstra | ca0dd44 | 2017-09-05 13:23:44 +0200 | [diff] [blame] | 5319 | |
| 5320 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) |
| 5321 | { |
| 5322 | struct perf_event_context *ctx; |
| 5323 | u64 count; |
| 5324 | |
| 5325 | ctx = perf_event_ctx_lock(event); |
| 5326 | count = __perf_event_read_value(event, enabled, running); |
| 5327 | perf_event_ctx_unlock(event, ctx); |
| 5328 | |
| 5329 | return count; |
| 5330 | } |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 5331 | EXPORT_SYMBOL_GPL(perf_event_read_value); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5332 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5333 | static int __perf_read_group_add(struct perf_event *leader, |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5334 | u64 read_format, u64 *values) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5335 | { |
Jiri Olsa | 2aeb188 | 2017-07-20 16:14:55 +0200 | [diff] [blame] | 5336 | struct perf_event_context *ctx = leader->ctx; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5337 | struct perf_event *sub; |
Jiri Olsa | 2aeb188 | 2017-07-20 16:14:55 +0200 | [diff] [blame] | 5338 | unsigned long flags; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5339 | int n = 1; /* skip @nr */ |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5340 | int ret; |
Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 5341 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5342 | ret = perf_event_read(leader, true); |
| 5343 | if (ret) |
| 5344 | return ret; |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5345 | |
Peter Zijlstra | a9cd819 | 2017-09-05 13:38:24 +0200 | [diff] [blame] | 5346 | raw_spin_lock_irqsave(&ctx->lock, flags); |
| 5347 | |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5348 | /* |
| 5349 | * Since we co-schedule groups, {enabled,running} times of siblings |
| 5350 | * will be identical to those of the leader, so we only publish one |
| 5351 | * set. |
| 5352 | */ |
| 5353 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
| 5354 | values[n++] += leader->total_time_enabled + |
| 5355 | atomic64_read(&leader->child_total_time_enabled); |
| 5356 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5357 | |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5358 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
| 5359 | values[n++] += leader->total_time_running + |
| 5360 | atomic64_read(&leader->child_total_time_running); |
| 5361 | } |
| 5362 | |
| 5363 | /* |
| 5364 | * Write {count,id} tuples for every sibling. |
| 5365 | */ |
| 5366 | values[n++] += perf_event_count(leader); |
Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 5367 | if (read_format & PERF_FORMAT_ID) |
| 5368 | values[n++] = primary_event_id(leader); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5369 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 5370 | for_each_sibling_event(sub, leader) { |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5371 | values[n++] += perf_event_count(sub); |
Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 5372 | if (read_format & PERF_FORMAT_ID) |
| 5373 | values[n++] = primary_event_id(sub); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5374 | } |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5375 | |
Jiri Olsa | 2aeb188 | 2017-07-20 16:14:55 +0200 | [diff] [blame] | 5376 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5377 | return 0; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5378 | } |
| 5379 | |
| 5380 | static int perf_read_group(struct perf_event *event, |
| 5381 | u64 read_format, char __user *buf) |
| 5382 | { |
| 5383 | struct perf_event *leader = event->group_leader, *child; |
| 5384 | struct perf_event_context *ctx = leader->ctx; |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5385 | int ret; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5386 | u64 *values; |
| 5387 | |
| 5388 | lockdep_assert_held(&ctx->mutex); |
| 5389 | |
| 5390 | values = kzalloc(event->read_size, GFP_KERNEL); |
| 5391 | if (!values) |
| 5392 | return -ENOMEM; |
| 5393 | |
| 5394 | values[0] = 1 + leader->nr_siblings; |
| 5395 | |
| 5396 | /* |
| 5397 | * By locking the child_mutex of the leader we effectively |
| 5398 | * lock the child list of all siblings.. XXX explain how. |
| 5399 | */ |
| 5400 | mutex_lock(&leader->child_mutex); |
| 5401 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5402 | ret = __perf_read_group_add(leader, read_format, values); |
| 5403 | if (ret) |
| 5404 | goto unlock; |
| 5405 | |
| 5406 | list_for_each_entry(child, &leader->child_list, child_list) { |
| 5407 | ret = __perf_read_group_add(child, read_format, values); |
| 5408 | if (ret) |
| 5409 | goto unlock; |
| 5410 | } |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5411 | |
| 5412 | mutex_unlock(&leader->child_mutex); |
| 5413 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5414 | ret = event->read_size; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5415 | if (copy_to_user(buf, values, event->read_size)) |
| 5416 | ret = -EFAULT; |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5417 | goto out; |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5418 | |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5419 | unlock: |
| 5420 | mutex_unlock(&leader->child_mutex); |
| 5421 | out: |
Peter Zijlstra | fa8c269 | 2015-09-03 20:07:49 -0700 | [diff] [blame] | 5422 | kfree(values); |
Peter Zijlstra | abf4868 | 2009-11-20 22:19:49 +0100 | [diff] [blame] | 5423 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5424 | } |
| 5425 | |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5426 | static int perf_read_one(struct perf_event *event, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5427 | u64 read_format, char __user *buf) |
| 5428 | { |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 5429 | u64 enabled, running; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5430 | u64 values[4]; |
| 5431 | int n = 0; |
| 5432 | |
Peter Zijlstra | ca0dd44 | 2017-09-05 13:23:44 +0200 | [diff] [blame] | 5433 | values[n++] = __perf_event_read_value(event, &enabled, &running); |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 5434 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 5435 | values[n++] = enabled; |
| 5436 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 5437 | values[n++] = running; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5438 | if (read_format & PERF_FORMAT_ID) |
| 5439 | values[n++] = primary_event_id(event); |
| 5440 | |
| 5441 | if (copy_to_user(buf, values, n * sizeof(u64))) |
| 5442 | return -EFAULT; |
| 5443 | |
| 5444 | return n * sizeof(u64); |
| 5445 | } |
| 5446 | |
Jiri Olsa | dc63398 | 2014-09-12 13:18:26 +0200 | [diff] [blame] | 5447 | static bool is_event_hup(struct perf_event *event) |
| 5448 | { |
| 5449 | bool no_children; |
| 5450 | |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 5451 | if (event->state > PERF_EVENT_STATE_EXIT) |
Jiri Olsa | dc63398 | 2014-09-12 13:18:26 +0200 | [diff] [blame] | 5452 | return false; |
| 5453 | |
| 5454 | mutex_lock(&event->child_mutex); |
| 5455 | no_children = list_empty(&event->child_list); |
| 5456 | mutex_unlock(&event->child_mutex); |
| 5457 | return no_children; |
| 5458 | } |
| 5459 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5460 | /* |
| 5461 | * Read the performance event - simple non blocking version for now |
| 5462 | */ |
| 5463 | static ssize_t |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5464 | __perf_read(struct perf_event *event, char __user *buf, size_t count) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5465 | { |
| 5466 | u64 read_format = event->attr.read_format; |
| 5467 | int ret; |
| 5468 | |
| 5469 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 5470 | * Return end-of-file for a read on an event that is in |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5471 | * error state (i.e. because it was pinned but it couldn't be |
| 5472 | * scheduled on to the CPU at some point). |
| 5473 | */ |
| 5474 | if (event->state == PERF_EVENT_STATE_ERROR) |
| 5475 | return 0; |
| 5476 | |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 5477 | if (count < event->read_size) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5478 | return -ENOSPC; |
| 5479 | |
| 5480 | WARN_ON_ONCE(event->ctx->parent_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5481 | if (read_format & PERF_FORMAT_GROUP) |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5482 | ret = perf_read_group(event, read_format, buf); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5483 | else |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5484 | ret = perf_read_one(event, read_format, buf); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5485 | |
| 5486 | return ret; |
| 5487 | } |
| 5488 | |
| 5489 | static ssize_t |
| 5490 | perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
| 5491 | { |
| 5492 | struct perf_event *event = file->private_data; |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5493 | struct perf_event_context *ctx; |
| 5494 | int ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5495 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 5496 | ret = security_perf_event_read(event); |
| 5497 | if (ret) |
| 5498 | return ret; |
| 5499 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5500 | ctx = perf_event_ctx_lock(event); |
Peter Zijlstra (Intel) | b15f495 | 2015-09-03 20:07:47 -0700 | [diff] [blame] | 5501 | ret = __perf_read(event, buf, count); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5502 | perf_event_ctx_unlock(event, ctx); |
| 5503 | |
| 5504 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5505 | } |
| 5506 | |
Al Viro | 9dd9574 | 2017-07-03 00:42:43 -0400 | [diff] [blame] | 5507 | static __poll_t perf_poll(struct file *file, poll_table *wait) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5508 | { |
| 5509 | struct perf_event *event = file->private_data; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5510 | struct perf_buffer *rb; |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 5511 | __poll_t events = EPOLLHUP; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5512 | |
Sebastian Andrzej Siewior | e708d7a | 2014-08-04 15:31:08 +0200 | [diff] [blame] | 5513 | poll_wait(file, &event->waitq, wait); |
Jiri Olsa | 179033b | 2014-08-07 11:48:26 -0400 | [diff] [blame] | 5514 | |
Jiri Olsa | dc63398 | 2014-09-12 13:18:26 +0200 | [diff] [blame] | 5515 | if (is_event_hup(event)) |
Jiri Olsa | 179033b | 2014-08-07 11:48:26 -0400 | [diff] [blame] | 5516 | return events; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5517 | |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5518 | /* |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5519 | * Pin the event->rb by taking event->mmap_mutex; otherwise |
| 5520 | * perf_event_set_output() can swizzle our rb and make us miss wakeups. |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5521 | */ |
| 5522 | mutex_lock(&event->mmap_mutex); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 5523 | rb = event->rb; |
| 5524 | if (rb) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5525 | events = atomic_xchg(&rb->poll, 0); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 5526 | mutex_unlock(&event->mmap_mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5527 | return events; |
| 5528 | } |
| 5529 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5530 | static void _perf_event_reset(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5531 | { |
Sukadev Bhattiprolu | 7d88962 | 2015-09-03 20:07:50 -0700 | [diff] [blame] | 5532 | (void)perf_event_read(event, false); |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 5533 | local64_set(&event->count, 0); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5534 | perf_event_update_userpage(event); |
| 5535 | } |
| 5536 | |
Like Xu | 52ba4b0 | 2019-10-27 18:52:39 +0800 | [diff] [blame] | 5537 | /* Assume it's not an event with inherit set. */ |
| 5538 | u64 perf_event_pause(struct perf_event *event, bool reset) |
| 5539 | { |
| 5540 | struct perf_event_context *ctx; |
| 5541 | u64 count; |
| 5542 | |
| 5543 | ctx = perf_event_ctx_lock(event); |
| 5544 | WARN_ON_ONCE(event->attr.inherit); |
| 5545 | _perf_event_disable(event); |
| 5546 | count = local64_read(&event->count); |
| 5547 | if (reset) |
| 5548 | local64_set(&event->count, 0); |
| 5549 | perf_event_ctx_unlock(event, ctx); |
| 5550 | |
| 5551 | return count; |
| 5552 | } |
| 5553 | EXPORT_SYMBOL_GPL(perf_event_pause); |
| 5554 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5555 | /* |
| 5556 | * Holding the top-level event's child_mutex means that any |
| 5557 | * descendant process that has inherited this event will block |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 5558 | * in perf_event_exit_event() if it goes to exit, thus satisfying the |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5559 | * task existence requirements of perf_event_enable/disable. |
| 5560 | */ |
| 5561 | static void perf_event_for_each_child(struct perf_event *event, |
| 5562 | void (*func)(struct perf_event *)) |
| 5563 | { |
| 5564 | struct perf_event *child; |
| 5565 | |
| 5566 | WARN_ON_ONCE(event->ctx->parent_ctx); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5567 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5568 | mutex_lock(&event->child_mutex); |
| 5569 | func(event); |
| 5570 | list_for_each_entry(child, &event->child_list, child_list) |
| 5571 | func(child); |
| 5572 | mutex_unlock(&event->child_mutex); |
| 5573 | } |
| 5574 | |
| 5575 | static void perf_event_for_each(struct perf_event *event, |
| 5576 | void (*func)(struct perf_event *)) |
| 5577 | { |
| 5578 | struct perf_event_context *ctx = event->ctx; |
| 5579 | struct perf_event *sibling; |
| 5580 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5581 | lockdep_assert_held(&ctx->mutex); |
| 5582 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5583 | event = event->group_leader; |
| 5584 | |
| 5585 | perf_event_for_each_child(event, func); |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 5586 | for_each_sibling_event(sibling, event) |
Michael Ellerman | 724b6da | 2012-04-11 11:54:13 +1000 | [diff] [blame] | 5587 | perf_event_for_each_child(sibling, func); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5588 | } |
| 5589 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 5590 | static void __perf_event_period(struct perf_event *event, |
| 5591 | struct perf_cpu_context *cpuctx, |
| 5592 | struct perf_event_context *ctx, |
| 5593 | void *info) |
Peter Zijlstra | 0017960 | 2015-11-30 16:26:35 +0100 | [diff] [blame] | 5594 | { |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 5595 | u64 value = *((u64 *)info); |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5596 | bool active; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5597 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5598 | if (event->attr.freq) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5599 | event->attr.sample_freq = value; |
| 5600 | } else { |
| 5601 | event->attr.sample_period = value; |
| 5602 | event->hw.sample_period = value; |
| 5603 | } |
Peter Zijlstra | bad7192 | 2013-11-27 13:54:38 +0000 | [diff] [blame] | 5604 | |
| 5605 | active = (event->state == PERF_EVENT_STATE_ACTIVE); |
| 5606 | if (active) { |
| 5607 | perf_pmu_disable(ctx->pmu); |
Peter Zijlstra | 1e02cd4 | 2016-03-10 15:39:24 +0100 | [diff] [blame] | 5608 | /* |
| 5609 | * We could be throttled; unthrottle now to avoid the tick |
| 5610 | * trying to unthrottle while we already re-started the event. |
| 5611 | */ |
| 5612 | if (event->hw.interrupts == MAX_INTERRUPTS) { |
| 5613 | event->hw.interrupts = 0; |
| 5614 | perf_log_throttle(event, 1); |
| 5615 | } |
Peter Zijlstra | bad7192 | 2013-11-27 13:54:38 +0000 | [diff] [blame] | 5616 | event->pmu->stop(event, PERF_EF_UPDATE); |
| 5617 | } |
| 5618 | |
| 5619 | local64_set(&event->hw.period_left, 0); |
| 5620 | |
| 5621 | if (active) { |
| 5622 | event->pmu->start(event, PERF_EF_RELOAD); |
| 5623 | perf_pmu_enable(ctx->pmu); |
| 5624 | } |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5625 | } |
| 5626 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 5627 | static int perf_event_check_period(struct perf_event *event, u64 value) |
| 5628 | { |
| 5629 | return event->pmu->check_period(event, value); |
| 5630 | } |
| 5631 | |
Like Xu | 3ca270f | 2019-10-27 18:52:38 +0800 | [diff] [blame] | 5632 | static int _perf_event_period(struct perf_event *event, u64 value) |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5633 | { |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5634 | if (!is_sampling_event(event)) |
| 5635 | return -EINVAL; |
| 5636 | |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5637 | if (!value) |
| 5638 | return -EINVAL; |
| 5639 | |
| 5640 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
| 5641 | return -EINVAL; |
| 5642 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 5643 | if (perf_event_check_period(event, value)) |
| 5644 | return -EINVAL; |
| 5645 | |
Ravi Bangoria | 913a90b | 2019-06-04 09:59:53 +0530 | [diff] [blame] | 5646 | if (!event->attr.freq && (value & (1ULL << 63))) |
| 5647 | return -EINVAL; |
| 5648 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 5649 | event_function_call(event, __perf_event_period, &value); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5650 | |
Peter Zijlstra | c7999c6 | 2015-08-04 19:22:49 +0200 | [diff] [blame] | 5651 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5652 | } |
| 5653 | |
Like Xu | 3ca270f | 2019-10-27 18:52:38 +0800 | [diff] [blame] | 5654 | int perf_event_period(struct perf_event *event, u64 value) |
| 5655 | { |
| 5656 | struct perf_event_context *ctx; |
| 5657 | int ret; |
| 5658 | |
| 5659 | ctx = perf_event_ctx_lock(event); |
| 5660 | ret = _perf_event_period(event, value); |
| 5661 | perf_event_ctx_unlock(event, ctx); |
| 5662 | |
| 5663 | return ret; |
| 5664 | } |
| 5665 | EXPORT_SYMBOL_GPL(perf_event_period); |
| 5666 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5667 | static const struct file_operations perf_fops; |
| 5668 | |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5669 | static inline int perf_fget_light(int fd, struct fd *p) |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5670 | { |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5671 | struct fd f = fdget(fd); |
| 5672 | if (!f.file) |
| 5673 | return -EBADF; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5674 | |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5675 | if (f.file->f_op != &perf_fops) { |
| 5676 | fdput(f); |
| 5677 | return -EBADF; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5678 | } |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5679 | *p = f; |
| 5680 | return 0; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5681 | } |
| 5682 | |
| 5683 | static int perf_event_set_output(struct perf_event *event, |
| 5684 | struct perf_event *output_event); |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5685 | static int perf_event_set_filter(struct perf_event *event, void __user *arg); |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 5686 | static int perf_copy_attr(struct perf_event_attr __user *uattr, |
| 5687 | struct perf_event_attr *attr); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5688 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5689 | static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5690 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5691 | void (*func)(struct perf_event *); |
| 5692 | u32 flags = arg; |
| 5693 | |
| 5694 | switch (cmd) { |
| 5695 | case PERF_EVENT_IOC_ENABLE: |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5696 | func = _perf_event_enable; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5697 | break; |
| 5698 | case PERF_EVENT_IOC_DISABLE: |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5699 | func = _perf_event_disable; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5700 | break; |
| 5701 | case PERF_EVENT_IOC_RESET: |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5702 | func = _perf_event_reset; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5703 | break; |
| 5704 | |
| 5705 | case PERF_EVENT_IOC_REFRESH: |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5706 | return _perf_event_refresh(event, arg); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5707 | |
| 5708 | case PERF_EVENT_IOC_PERIOD: |
Like Xu | 3ca270f | 2019-10-27 18:52:38 +0800 | [diff] [blame] | 5709 | { |
| 5710 | u64 value; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5711 | |
Like Xu | 3ca270f | 2019-10-27 18:52:38 +0800 | [diff] [blame] | 5712 | if (copy_from_user(&value, (u64 __user *)arg, sizeof(value))) |
| 5713 | return -EFAULT; |
| 5714 | |
| 5715 | return _perf_event_period(event, value); |
| 5716 | } |
Jiri Olsa | cf4957f | 2012-10-24 13:37:58 +0200 | [diff] [blame] | 5717 | case PERF_EVENT_IOC_ID: |
| 5718 | { |
| 5719 | u64 id = primary_event_id(event); |
| 5720 | |
| 5721 | if (copy_to_user((void __user *)arg, &id, sizeof(id))) |
| 5722 | return -EFAULT; |
| 5723 | return 0; |
| 5724 | } |
| 5725 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5726 | case PERF_EVENT_IOC_SET_OUTPUT: |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5727 | { |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5728 | int ret; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5729 | if (arg != -1) { |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 5730 | struct perf_event *output_event; |
| 5731 | struct fd output; |
| 5732 | ret = perf_fget_light(arg, &output); |
| 5733 | if (ret) |
| 5734 | return ret; |
| 5735 | output_event = output.file->private_data; |
| 5736 | ret = perf_event_set_output(event, output_event); |
| 5737 | fdput(output); |
| 5738 | } else { |
| 5739 | ret = perf_event_set_output(event, NULL); |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5740 | } |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 5741 | return ret; |
| 5742 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5743 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 5744 | case PERF_EVENT_IOC_SET_FILTER: |
| 5745 | return perf_event_set_filter(event, (void __user *)arg); |
| 5746 | |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 5747 | case PERF_EVENT_IOC_SET_BPF: |
Andrii Nakryiko | 652c1b1 | 2021-08-15 00:05:56 -0700 | [diff] [blame] | 5748 | { |
| 5749 | struct bpf_prog *prog; |
| 5750 | int err; |
| 5751 | |
| 5752 | prog = bpf_prog_get(arg); |
| 5753 | if (IS_ERR(prog)) |
| 5754 | return PTR_ERR(prog); |
| 5755 | |
Andrii Nakryiko | 82e6b1e | 2021-08-15 00:05:58 -0700 | [diff] [blame] | 5756 | err = perf_event_set_bpf_prog(event, prog, 0); |
Andrii Nakryiko | 652c1b1 | 2021-08-15 00:05:56 -0700 | [diff] [blame] | 5757 | if (err) { |
| 5758 | bpf_prog_put(prog); |
| 5759 | return err; |
| 5760 | } |
| 5761 | |
| 5762 | return 0; |
| 5763 | } |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 5764 | |
Wang Nan | 86e7972 | 2016-03-28 06:41:29 +0000 | [diff] [blame] | 5765 | case PERF_EVENT_IOC_PAUSE_OUTPUT: { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5766 | struct perf_buffer *rb; |
Wang Nan | 86e7972 | 2016-03-28 06:41:29 +0000 | [diff] [blame] | 5767 | |
| 5768 | rcu_read_lock(); |
| 5769 | rb = rcu_dereference(event->rb); |
| 5770 | if (!rb || !rb->nr_pages) { |
| 5771 | rcu_read_unlock(); |
| 5772 | return -EINVAL; |
| 5773 | } |
| 5774 | rb_toggle_paused(rb, !!arg); |
| 5775 | rcu_read_unlock(); |
| 5776 | return 0; |
| 5777 | } |
Yonghong Song | f371b30 | 2017-12-11 11:39:02 -0800 | [diff] [blame] | 5778 | |
| 5779 | case PERF_EVENT_IOC_QUERY_BPF: |
Yonghong Song | f4e2298 | 2017-12-13 10:35:37 -0800 | [diff] [blame] | 5780 | return perf_event_query_prog_array(event, (void __user *)arg); |
Milind Chabbi | 32ff77e | 2018-03-12 14:45:47 +0100 | [diff] [blame] | 5781 | |
| 5782 | case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: { |
| 5783 | struct perf_event_attr new_attr; |
| 5784 | int err = perf_copy_attr((struct perf_event_attr __user *)arg, |
| 5785 | &new_attr); |
| 5786 | |
| 5787 | if (err) |
| 5788 | return err; |
| 5789 | |
| 5790 | return perf_event_modify_attr(event, &new_attr); |
| 5791 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5792 | default: |
| 5793 | return -ENOTTY; |
| 5794 | } |
| 5795 | |
| 5796 | if (flags & PERF_IOC_FLAG_GROUP) |
| 5797 | perf_event_for_each(event, func); |
| 5798 | else |
| 5799 | perf_event_for_each_child(event, func); |
| 5800 | |
| 5801 | return 0; |
| 5802 | } |
| 5803 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5804 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 5805 | { |
| 5806 | struct perf_event *event = file->private_data; |
| 5807 | struct perf_event_context *ctx; |
| 5808 | long ret; |
| 5809 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 5810 | /* Treat ioctl like writes as it is likely a mutating operation. */ |
| 5811 | ret = security_perf_event_write(event); |
| 5812 | if (ret) |
| 5813 | return ret; |
| 5814 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5815 | ctx = perf_event_ctx_lock(event); |
| 5816 | ret = _perf_ioctl(event, cmd, arg); |
| 5817 | perf_event_ctx_unlock(event, ctx); |
| 5818 | |
| 5819 | return ret; |
| 5820 | } |
| 5821 | |
Pawel Moll | b3f2078 | 2014-06-13 16:03:32 +0100 | [diff] [blame] | 5822 | #ifdef CONFIG_COMPAT |
| 5823 | static long perf_compat_ioctl(struct file *file, unsigned int cmd, |
| 5824 | unsigned long arg) |
| 5825 | { |
| 5826 | switch (_IOC_NR(cmd)) { |
| 5827 | case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): |
| 5828 | case _IOC_NR(PERF_EVENT_IOC_ID): |
Eugene Syromiatnikov | 82489c5 | 2018-05-21 14:34:20 +0200 | [diff] [blame] | 5829 | case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF): |
| 5830 | case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES): |
Pawel Moll | b3f2078 | 2014-06-13 16:03:32 +0100 | [diff] [blame] | 5831 | /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ |
| 5832 | if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { |
| 5833 | cmd &= ~IOCSIZE_MASK; |
| 5834 | cmd |= sizeof(void *) << IOCSIZE_SHIFT; |
| 5835 | } |
| 5836 | break; |
| 5837 | } |
| 5838 | return perf_ioctl(file, cmd, arg); |
| 5839 | } |
| 5840 | #else |
| 5841 | # define perf_compat_ioctl NULL |
| 5842 | #endif |
| 5843 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5844 | int perf_event_task_enable(void) |
| 5845 | { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5846 | struct perf_event_context *ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5847 | struct perf_event *event; |
| 5848 | |
| 5849 | mutex_lock(¤t->perf_event_mutex); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5850 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { |
| 5851 | ctx = perf_event_ctx_lock(event); |
| 5852 | perf_event_for_each_child(event, _perf_event_enable); |
| 5853 | perf_event_ctx_unlock(event, ctx); |
| 5854 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5855 | mutex_unlock(¤t->perf_event_mutex); |
| 5856 | |
| 5857 | return 0; |
| 5858 | } |
| 5859 | |
| 5860 | int perf_event_task_disable(void) |
| 5861 | { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5862 | struct perf_event_context *ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5863 | struct perf_event *event; |
| 5864 | |
| 5865 | mutex_lock(¤t->perf_event_mutex); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 5866 | list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { |
| 5867 | ctx = perf_event_ctx_lock(event); |
| 5868 | perf_event_for_each_child(event, _perf_event_disable); |
| 5869 | perf_event_ctx_unlock(event, ctx); |
| 5870 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5871 | mutex_unlock(¤t->perf_event_mutex); |
| 5872 | |
| 5873 | return 0; |
| 5874 | } |
| 5875 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5876 | static int perf_event_index(struct perf_event *event) |
| 5877 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 5878 | if (event->hw.state & PERF_HES_STOPPED) |
| 5879 | return 0; |
| 5880 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5881 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 5882 | return 0; |
| 5883 | |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 5884 | return event->pmu->event_idx(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5885 | } |
| 5886 | |
Peter Zijlstra | fa731587 | 2013-09-19 10:16:42 +0200 | [diff] [blame] | 5887 | static void perf_event_init_userpage(struct perf_event *event) |
| 5888 | { |
| 5889 | struct perf_event_mmap_page *userpg; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5890 | struct perf_buffer *rb; |
Peter Zijlstra | fa731587 | 2013-09-19 10:16:42 +0200 | [diff] [blame] | 5891 | |
| 5892 | rcu_read_lock(); |
| 5893 | rb = rcu_dereference(event->rb); |
| 5894 | if (!rb) |
| 5895 | goto unlock; |
| 5896 | |
| 5897 | userpg = rb->user_page; |
| 5898 | |
| 5899 | /* Allow new userspace to detect that bit 0 is deprecated */ |
| 5900 | userpg->cap_bit0_is_deprecated = 1; |
| 5901 | userpg->size = offsetof(struct perf_event_mmap_page, __reserved); |
Alexander Shishkin | e8c6dea | 2015-01-14 14:18:10 +0200 | [diff] [blame] | 5902 | userpg->data_offset = PAGE_SIZE; |
| 5903 | userpg->data_size = perf_data_size(rb); |
Peter Zijlstra | fa731587 | 2013-09-19 10:16:42 +0200 | [diff] [blame] | 5904 | |
| 5905 | unlock: |
| 5906 | rcu_read_unlock(); |
| 5907 | } |
| 5908 | |
Andy Lutomirski | c1317ec | 2014-10-24 15:58:11 -0700 | [diff] [blame] | 5909 | void __weak arch_perf_update_userpage( |
| 5910 | struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5911 | { |
| 5912 | } |
| 5913 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5914 | /* |
| 5915 | * Callers need to ensure there can be no nesting of this function, otherwise |
| 5916 | * the seqlock logic goes bad. We can not serialize this because the arch |
| 5917 | * code calls this from NMI context. |
| 5918 | */ |
| 5919 | void perf_event_update_userpage(struct perf_event *event) |
| 5920 | { |
| 5921 | struct perf_event_mmap_page *userpg; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5922 | struct perf_buffer *rb; |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5923 | u64 enabled, running, now; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5924 | |
| 5925 | rcu_read_lock(); |
Peter Zijlstra | 5ec4c59 | 2013-08-02 21:16:30 +0200 | [diff] [blame] | 5926 | rb = rcu_dereference(event->rb); |
| 5927 | if (!rb) |
| 5928 | goto unlock; |
| 5929 | |
Eric B Munson | 0d64120 | 2011-06-24 12:26:26 -0400 | [diff] [blame] | 5930 | /* |
| 5931 | * compute total_time_enabled, total_time_running |
| 5932 | * based on snapshot values taken when the event |
| 5933 | * was last scheduled in. |
| 5934 | * |
| 5935 | * we cannot simply called update_context_time() |
| 5936 | * because of locking issue as we can be called in |
| 5937 | * NMI context |
| 5938 | */ |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5939 | calc_timer_values(event, &now, &enabled, &running); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5940 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5941 | userpg = rb->user_page; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5942 | /* |
Michael O'Farrell | 9d2dcc8f | 2018-07-30 13:14:34 -0700 | [diff] [blame] | 5943 | * Disable preemption to guarantee consistent time stamps are stored to |
| 5944 | * the user page. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5945 | */ |
| 5946 | preempt_disable(); |
| 5947 | ++userpg->lock; |
| 5948 | barrier(); |
| 5949 | userpg->index = perf_event_index(event); |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 5950 | userpg->offset = perf_event_count(event); |
Peter Zijlstra | 365a403 | 2011-11-21 20:58:59 +0100 | [diff] [blame] | 5951 | if (userpg->index) |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 5952 | userpg->offset -= local64_read(&event->hw.prev_count); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5953 | |
Eric B Munson | 0d64120 | 2011-06-24 12:26:26 -0400 | [diff] [blame] | 5954 | userpg->time_enabled = enabled + |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5955 | atomic64_read(&event->child_total_time_enabled); |
| 5956 | |
Eric B Munson | 0d64120 | 2011-06-24 12:26:26 -0400 | [diff] [blame] | 5957 | userpg->time_running = running + |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5958 | atomic64_read(&event->child_total_time_running); |
| 5959 | |
Andy Lutomirski | c1317ec | 2014-10-24 15:58:11 -0700 | [diff] [blame] | 5960 | arch_perf_update_userpage(event, userpg, now); |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 5961 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5962 | barrier(); |
| 5963 | ++userpg->lock; |
| 5964 | preempt_enable(); |
| 5965 | unlock: |
| 5966 | rcu_read_unlock(); |
| 5967 | } |
Suzuki K Poulose | 82975c4 | 2018-01-02 11:25:26 +0000 | [diff] [blame] | 5968 | EXPORT_SYMBOL_GPL(perf_event_update_userpage); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 5969 | |
Souptick Joarder | 9e3ed2d | 2018-05-21 23:55:20 +0530 | [diff] [blame] | 5970 | static vm_fault_t perf_mmap_fault(struct vm_fault *vmf) |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5971 | { |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 5972 | struct perf_event *event = vmf->vma->vm_file->private_data; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 5973 | struct perf_buffer *rb; |
Souptick Joarder | 9e3ed2d | 2018-05-21 23:55:20 +0530 | [diff] [blame] | 5974 | vm_fault_t ret = VM_FAULT_SIGBUS; |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5975 | |
| 5976 | if (vmf->flags & FAULT_FLAG_MKWRITE) { |
| 5977 | if (vmf->pgoff == 0) |
| 5978 | ret = 0; |
| 5979 | return ret; |
| 5980 | } |
| 5981 | |
| 5982 | rcu_read_lock(); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5983 | rb = rcu_dereference(event->rb); |
| 5984 | if (!rb) |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5985 | goto unlock; |
| 5986 | |
| 5987 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) |
| 5988 | goto unlock; |
| 5989 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 5990 | vmf->page = perf_mmap_to_page(rb, vmf->pgoff); |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5991 | if (!vmf->page) |
| 5992 | goto unlock; |
| 5993 | |
| 5994 | get_page(vmf->page); |
Dave Jiang | 11bac80 | 2017-02-24 14:56:41 -0800 | [diff] [blame] | 5995 | vmf->page->mapping = vmf->vma->vm_file->f_mapping; |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 5996 | vmf->page->index = vmf->pgoff; |
| 5997 | |
| 5998 | ret = 0; |
| 5999 | unlock: |
| 6000 | rcu_read_unlock(); |
| 6001 | |
| 6002 | return ret; |
| 6003 | } |
| 6004 | |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6005 | static void ring_buffer_attach(struct perf_event *event, |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6006 | struct perf_buffer *rb) |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6007 | { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6008 | struct perf_buffer *old_rb = NULL; |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6009 | unsigned long flags; |
| 6010 | |
James Clark | 961c391 | 2021-12-06 11:38:40 +0000 | [diff] [blame] | 6011 | WARN_ON_ONCE(event->parent); |
| 6012 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6013 | if (event->rb) { |
| 6014 | /* |
| 6015 | * Should be impossible, we set this when removing |
| 6016 | * event->rb_entry and wait/clear when adding event->rb_entry. |
| 6017 | */ |
| 6018 | WARN_ON_ONCE(event->rcu_pending); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6019 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6020 | old_rb = event->rb; |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6021 | spin_lock_irqsave(&old_rb->event_lock, flags); |
| 6022 | list_del_rcu(&event->rb_entry); |
| 6023 | spin_unlock_irqrestore(&old_rb->event_lock, flags); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6024 | |
Oleg Nesterov | 2f993cf | 2015-05-30 22:04:25 +0200 | [diff] [blame] | 6025 | event->rcu_batches = get_state_synchronize_rcu(); |
| 6026 | event->rcu_pending = 1; |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6027 | } |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6028 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6029 | if (rb) { |
Oleg Nesterov | 2f993cf | 2015-05-30 22:04:25 +0200 | [diff] [blame] | 6030 | if (event->rcu_pending) { |
| 6031 | cond_synchronize_rcu(event->rcu_batches); |
| 6032 | event->rcu_pending = 0; |
| 6033 | } |
| 6034 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6035 | spin_lock_irqsave(&rb->event_lock, flags); |
| 6036 | list_add_rcu(&event->rb_entry, &rb->event_list); |
| 6037 | spin_unlock_irqrestore(&rb->event_lock, flags); |
| 6038 | } |
| 6039 | |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 6040 | /* |
| 6041 | * Avoid racing with perf_mmap_close(AUX): stop the event |
| 6042 | * before swizzling the event::rb pointer; if it's getting |
| 6043 | * unmapped, its aux_mmap_count will be 0 and it won't |
| 6044 | * restart. See the comment in __perf_pmu_output_stop(). |
| 6045 | * |
| 6046 | * Data will inevitably be lost when set_output is done in |
| 6047 | * mid-air, but then again, whoever does it like this is |
| 6048 | * not in for the data anyway. |
| 6049 | */ |
| 6050 | if (has_aux(event)) |
| 6051 | perf_event_stop(event, 0); |
| 6052 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6053 | rcu_assign_pointer(event->rb, rb); |
| 6054 | |
| 6055 | if (old_rb) { |
| 6056 | ring_buffer_put(old_rb); |
| 6057 | /* |
| 6058 | * Since we detached before setting the new rb, so that we |
| 6059 | * could attach the new rb, we could have missed a wakeup. |
| 6060 | * Provide it now. |
| 6061 | */ |
| 6062 | wake_up_all(&event->waitq); |
| 6063 | } |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6064 | } |
| 6065 | |
| 6066 | static void ring_buffer_wakeup(struct perf_event *event) |
| 6067 | { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6068 | struct perf_buffer *rb; |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6069 | |
James Clark | 961c391 | 2021-12-06 11:38:40 +0000 | [diff] [blame] | 6070 | if (event->parent) |
| 6071 | event = event->parent; |
| 6072 | |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6073 | rcu_read_lock(); |
| 6074 | rb = rcu_dereference(event->rb); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6075 | if (rb) { |
| 6076 | list_for_each_entry_rcu(event, &rb->event_list, rb_entry) |
| 6077 | wake_up_all(&event->waitq); |
| 6078 | } |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6079 | rcu_read_unlock(); |
| 6080 | } |
| 6081 | |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6082 | struct perf_buffer *ring_buffer_get(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6083 | { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6084 | struct perf_buffer *rb; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6085 | |
James Clark | 961c391 | 2021-12-06 11:38:40 +0000 | [diff] [blame] | 6086 | if (event->parent) |
| 6087 | event = event->parent; |
| 6088 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6089 | rcu_read_lock(); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6090 | rb = rcu_dereference(event->rb); |
| 6091 | if (rb) { |
Elena Reshetova | fecb8ed | 2019-01-28 14:27:27 +0200 | [diff] [blame] | 6092 | if (!refcount_inc_not_zero(&rb->refcount)) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6093 | rb = NULL; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6094 | } |
| 6095 | rcu_read_unlock(); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6096 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6097 | return rb; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6098 | } |
| 6099 | |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6100 | void ring_buffer_put(struct perf_buffer *rb) |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6101 | { |
Elena Reshetova | fecb8ed | 2019-01-28 14:27:27 +0200 | [diff] [blame] | 6102 | if (!refcount_dec_and_test(&rb->refcount)) |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6103 | return; |
| 6104 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6105 | WARN_ON_ONCE(!list_empty(&rb->event_list)); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6106 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6107 | call_rcu(&rb->rcu_head, rb_free_rcu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6108 | } |
| 6109 | |
| 6110 | static void perf_mmap_open(struct vm_area_struct *vma) |
| 6111 | { |
| 6112 | struct perf_event *event = vma->vm_file->private_data; |
| 6113 | |
| 6114 | atomic_inc(&event->mmap_count); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6115 | atomic_inc(&event->rb->mmap_count); |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 6116 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6117 | if (vma->vm_pgoff) |
| 6118 | atomic_inc(&event->rb->aux_mmap_count); |
| 6119 | |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 6120 | if (event->pmu->event_mapped) |
Peter Zijlstra | bfe33492 | 2017-08-02 19:39:30 +0200 | [diff] [blame] | 6121 | event->pmu->event_mapped(event, vma->vm_mm); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6122 | } |
| 6123 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 6124 | static void perf_pmu_output_stop(struct perf_event *event); |
| 6125 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6126 | /* |
| 6127 | * A buffer can be mmap()ed multiple times; either directly through the same |
| 6128 | * event, or through other events by use of perf_event_set_output(). |
| 6129 | * |
| 6130 | * In order to undo the VM accounting done by perf_mmap() we need to destroy |
| 6131 | * the buffer here, where we still have a VM context. This means we need |
| 6132 | * to detach all events redirecting to us. |
| 6133 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6134 | static void perf_mmap_close(struct vm_area_struct *vma) |
| 6135 | { |
| 6136 | struct perf_event *event = vma->vm_file->private_data; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6137 | struct perf_buffer *rb = ring_buffer_get(event); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6138 | struct user_struct *mmap_user = rb->mmap_user; |
| 6139 | int mmap_locked = rb->mmap_locked; |
| 6140 | unsigned long size = perf_data_size(rb); |
Jiri Olsa | f91072e | 2020-09-16 13:53:11 +0200 | [diff] [blame] | 6141 | bool detach_rest = false; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6142 | |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 6143 | if (event->pmu->event_unmapped) |
Peter Zijlstra | bfe33492 | 2017-08-02 19:39:30 +0200 | [diff] [blame] | 6144 | event->pmu->event_unmapped(event, vma->vm_mm); |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 6145 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6146 | /* |
| 6147 | * rb->aux_mmap_count will always drop before rb->mmap_count and |
| 6148 | * event->mmap_count, so it is ok to use event->mmap_mutex to |
| 6149 | * serialize with perf_mmap here. |
| 6150 | */ |
| 6151 | if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && |
| 6152 | atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) { |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 6153 | /* |
| 6154 | * Stop all AUX events that are writing to this buffer, |
| 6155 | * so that we can free its AUX pages and corresponding PMU |
| 6156 | * data. Note that after rb::aux_mmap_count dropped to zero, |
| 6157 | * they won't start any more (see perf_aux_output_begin()). |
| 6158 | */ |
| 6159 | perf_pmu_output_stop(event); |
| 6160 | |
| 6161 | /* now it's safe to free the pages */ |
Alexander Shishkin | 36b3db0 | 2019-11-15 18:08:18 +0200 | [diff] [blame] | 6162 | atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); |
| 6163 | atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6164 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 6165 | /* this has to be the last one */ |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6166 | rb_free_aux(rb); |
Elena Reshetova | ca3bb3d | 2019-01-28 14:27:28 +0200 | [diff] [blame] | 6167 | WARN_ON_ONCE(refcount_read(&rb->aux_refcount)); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 6168 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6169 | mutex_unlock(&event->mmap_mutex); |
| 6170 | } |
| 6171 | |
Jiri Olsa | f91072e | 2020-09-16 13:53:11 +0200 | [diff] [blame] | 6172 | if (atomic_dec_and_test(&rb->mmap_count)) |
| 6173 | detach_rest = true; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6174 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6175 | if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6176 | goto out_put; |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6177 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6178 | ring_buffer_attach(event, NULL); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6179 | mutex_unlock(&event->mmap_mutex); |
| 6180 | |
| 6181 | /* If there's still other mmap()s of this buffer, we're done. */ |
Jiri Olsa | f91072e | 2020-09-16 13:53:11 +0200 | [diff] [blame] | 6182 | if (!detach_rest) |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6183 | goto out_put; |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6184 | |
| 6185 | /* |
| 6186 | * No other mmap()s, detach from all other events that might redirect |
| 6187 | * into the now unreachable buffer. Somewhat complicated by the |
| 6188 | * fact that rb::event_lock otherwise nests inside mmap_mutex. |
| 6189 | */ |
| 6190 | again: |
| 6191 | rcu_read_lock(); |
| 6192 | list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { |
| 6193 | if (!atomic_long_inc_not_zero(&event->refcount)) { |
| 6194 | /* |
| 6195 | * This event is en-route to free_event() which will |
| 6196 | * detach it and remove it from the list. |
| 6197 | */ |
| 6198 | continue; |
| 6199 | } |
| 6200 | rcu_read_unlock(); |
| 6201 | |
| 6202 | mutex_lock(&event->mmap_mutex); |
| 6203 | /* |
| 6204 | * Check we didn't race with perf_event_set_output() which can |
| 6205 | * swizzle the rb from under us while we were waiting to |
| 6206 | * acquire mmap_mutex. |
| 6207 | * |
| 6208 | * If we find a different rb; ignore this event, a next |
| 6209 | * iteration will no longer find it on the list. We have to |
| 6210 | * still restart the iteration to make sure we're not now |
| 6211 | * iterating the wrong list. |
| 6212 | */ |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6213 | if (event->rb == rb) |
| 6214 | ring_buffer_attach(event, NULL); |
| 6215 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6216 | mutex_unlock(&event->mmap_mutex); |
| 6217 | put_event(event); |
| 6218 | |
| 6219 | /* |
| 6220 | * Restart the iteration; either we're on the wrong list or |
| 6221 | * destroyed its integrity by doing a deletion. |
| 6222 | */ |
| 6223 | goto again; |
| 6224 | } |
| 6225 | rcu_read_unlock(); |
| 6226 | |
| 6227 | /* |
| 6228 | * It could be there's still a few 0-ref events on the list; they'll |
| 6229 | * get cleaned up by free_event() -- they'll also still have their |
| 6230 | * ref on the rb and will free it whenever they are done with it. |
| 6231 | * |
| 6232 | * Aside from that, this buffer is 'fully' detached and unmapped, |
| 6233 | * undo the VM accounting. |
| 6234 | */ |
| 6235 | |
Song Liu | d44248a | 2019-09-04 14:46:18 -0700 | [diff] [blame] | 6236 | atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked, |
| 6237 | &mmap_user->locked_vm); |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 6238 | atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6239 | free_uid(mmap_user); |
| 6240 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 6241 | out_put: |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6242 | ring_buffer_put(rb); /* could be last */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6243 | } |
| 6244 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 6245 | static const struct vm_operations_struct perf_mmap_vmops = { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6246 | .open = perf_mmap_open, |
Ingo Molnar | fca0c11 | 2018-12-03 10:52:21 +0100 | [diff] [blame] | 6247 | .close = perf_mmap_close, /* non mergeable */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6248 | .fault = perf_mmap_fault, |
| 6249 | .page_mkwrite = perf_mmap_fault, |
| 6250 | }; |
| 6251 | |
| 6252 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) |
| 6253 | { |
| 6254 | struct perf_event *event = file->private_data; |
| 6255 | unsigned long user_locked, user_lock_limit; |
| 6256 | struct user_struct *user = current_user(); |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6257 | struct perf_buffer *rb = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6258 | unsigned long locked, lock_limit; |
| 6259 | unsigned long vma_size; |
| 6260 | unsigned long nr_pages; |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6261 | long user_extra = 0, extra = 0; |
Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 6262 | int ret = 0, flags = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6263 | |
Peter Zijlstra | c792061 | 2010-05-18 10:33:24 +0200 | [diff] [blame] | 6264 | /* |
| 6265 | * Don't allow mmap() of inherited per-task counters. This would |
| 6266 | * create a performance issue due to all children writing to the |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6267 | * same rb. |
Peter Zijlstra | c792061 | 2010-05-18 10:33:24 +0200 | [diff] [blame] | 6268 | */ |
| 6269 | if (event->cpu == -1 && event->attr.inherit) |
| 6270 | return -EINVAL; |
| 6271 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6272 | if (!(vma->vm_flags & VM_SHARED)) |
| 6273 | return -EINVAL; |
| 6274 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 6275 | ret = security_perf_event_read(event); |
| 6276 | if (ret) |
| 6277 | return ret; |
| 6278 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6279 | vma_size = vma->vm_end - vma->vm_start; |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6280 | |
| 6281 | if (vma->vm_pgoff == 0) { |
| 6282 | nr_pages = (vma_size / PAGE_SIZE) - 1; |
| 6283 | } else { |
| 6284 | /* |
| 6285 | * AUX area mapping: if rb->aux_nr_pages != 0, it's already |
| 6286 | * mapped, all subsequent mappings should have the same size |
| 6287 | * and offset. Must be above the normal perf buffer. |
| 6288 | */ |
| 6289 | u64 aux_offset, aux_size; |
| 6290 | |
| 6291 | if (!event->rb) |
| 6292 | return -EINVAL; |
| 6293 | |
| 6294 | nr_pages = vma_size / PAGE_SIZE; |
| 6295 | |
| 6296 | mutex_lock(&event->mmap_mutex); |
| 6297 | ret = -EINVAL; |
| 6298 | |
| 6299 | rb = event->rb; |
| 6300 | if (!rb) |
| 6301 | goto aux_unlock; |
| 6302 | |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 6303 | aux_offset = READ_ONCE(rb->user_page->aux_offset); |
| 6304 | aux_size = READ_ONCE(rb->user_page->aux_size); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6305 | |
| 6306 | if (aux_offset < perf_data_size(rb) + PAGE_SIZE) |
| 6307 | goto aux_unlock; |
| 6308 | |
| 6309 | if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) |
| 6310 | goto aux_unlock; |
| 6311 | |
| 6312 | /* already mapped with a different offset */ |
| 6313 | if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) |
| 6314 | goto aux_unlock; |
| 6315 | |
| 6316 | if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) |
| 6317 | goto aux_unlock; |
| 6318 | |
| 6319 | /* already mapped with a different size */ |
| 6320 | if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) |
| 6321 | goto aux_unlock; |
| 6322 | |
| 6323 | if (!is_power_of_2(nr_pages)) |
| 6324 | goto aux_unlock; |
| 6325 | |
| 6326 | if (!atomic_inc_not_zero(&rb->mmap_count)) |
| 6327 | goto aux_unlock; |
| 6328 | |
| 6329 | if (rb_has_aux(rb)) { |
| 6330 | atomic_inc(&rb->aux_mmap_count); |
| 6331 | ret = 0; |
| 6332 | goto unlock; |
| 6333 | } |
| 6334 | |
| 6335 | atomic_set(&rb->aux_mmap_count, 1); |
| 6336 | user_extra = nr_pages; |
| 6337 | |
| 6338 | goto accounting; |
| 6339 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6340 | |
| 6341 | /* |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6342 | * If we have rb pages ensure they're a power-of-two number, so we |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6343 | * can do bitmasks instead of modulo. |
| 6344 | */ |
Kan Liang | 2ed1131 | 2015-03-02 02:14:26 -0500 | [diff] [blame] | 6345 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6346 | return -EINVAL; |
| 6347 | |
| 6348 | if (vma_size != PAGE_SIZE * (1 + nr_pages)) |
| 6349 | return -EINVAL; |
| 6350 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6351 | WARN_ON_ONCE(event->ctx->parent_ctx); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6352 | again: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6353 | mutex_lock(&event->mmap_mutex); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6354 | if (event->rb) { |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6355 | if (event->rb->nr_pages != nr_pages) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6356 | ret = -EINVAL; |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6357 | goto unlock; |
| 6358 | } |
| 6359 | |
| 6360 | if (!atomic_inc_not_zero(&event->rb->mmap_count)) { |
| 6361 | /* |
| 6362 | * Raced against perf_mmap_close() through |
| 6363 | * perf_event_set_output(). Try again, hope for better |
| 6364 | * luck. |
| 6365 | */ |
| 6366 | mutex_unlock(&event->mmap_mutex); |
| 6367 | goto again; |
| 6368 | } |
| 6369 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6370 | goto unlock; |
| 6371 | } |
| 6372 | |
| 6373 | user_extra = nr_pages + 1; |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6374 | |
| 6375 | accounting: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6376 | user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); |
| 6377 | |
| 6378 | /* |
| 6379 | * Increase the limit linearly with more CPUs: |
| 6380 | */ |
| 6381 | user_lock_limit *= num_online_cpus(); |
| 6382 | |
Song Liu | 0034615 | 2020-01-23 10:11:46 -0800 | [diff] [blame] | 6383 | user_locked = atomic_long_read(&user->locked_vm); |
| 6384 | |
| 6385 | /* |
| 6386 | * sysctl_perf_event_mlock may have changed, so that |
| 6387 | * user->locked_vm > user_lock_limit |
| 6388 | */ |
| 6389 | if (user_locked > user_lock_limit) |
| 6390 | user_locked = user_lock_limit; |
| 6391 | user_locked += user_extra; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6392 | |
Alexander Shishkin | c4b7547 | 2019-11-20 19:06:40 +0200 | [diff] [blame] | 6393 | if (user_locked > user_lock_limit) { |
Song Liu | d44248a | 2019-09-04 14:46:18 -0700 | [diff] [blame] | 6394 | /* |
| 6395 | * charge locked_vm until it hits user_lock_limit; |
| 6396 | * charge the rest from pinned_vm |
| 6397 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6398 | extra = user_locked - user_lock_limit; |
Song Liu | d44248a | 2019-09-04 14:46:18 -0700 | [diff] [blame] | 6399 | user_extra -= extra; |
| 6400 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6401 | |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 6402 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6403 | lock_limit >>= PAGE_SHIFT; |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 6404 | locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6405 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 6406 | if ((locked > lock_limit) && perf_is_paranoid() && |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6407 | !capable(CAP_IPC_LOCK)) { |
| 6408 | ret = -EPERM; |
| 6409 | goto unlock; |
| 6410 | } |
| 6411 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6412 | WARN_ON(!rb && event->rb); |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 6413 | |
Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 6414 | if (vma->vm_flags & VM_WRITE) |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6415 | flags |= RING_BUFFER_WRITABLE; |
Peter Zijlstra | d57e34f | 2010-05-28 19:41:35 +0200 | [diff] [blame] | 6416 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6417 | if (!rb) { |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6418 | rb = rb_alloc(nr_pages, |
| 6419 | event->attr.watermark ? event->attr.wakeup_watermark : 0, |
| 6420 | event->cpu, flags); |
| 6421 | |
| 6422 | if (!rb) { |
| 6423 | ret = -ENOMEM; |
| 6424 | goto unlock; |
| 6425 | } |
| 6426 | |
| 6427 | atomic_set(&rb->mmap_count, 1); |
| 6428 | rb->mmap_user = get_current_user(); |
| 6429 | rb->mmap_locked = extra; |
| 6430 | |
| 6431 | ring_buffer_attach(event, rb); |
| 6432 | |
Song Liu | f792565 | 2021-09-29 12:43:13 -0700 | [diff] [blame] | 6433 | perf_event_update_time(event); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6434 | perf_event_init_userpage(event); |
| 6435 | perf_event_update_userpage(event); |
| 6436 | } else { |
Alexander Shishkin | 1a59413 | 2015-01-14 14:18:18 +0200 | [diff] [blame] | 6437 | ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, |
| 6438 | event->attr.aux_watermark, flags); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6439 | if (!ret) |
| 6440 | rb->aux_mmap_locked = extra; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6441 | } |
Peter Zijlstra | 26cb63a | 2013-05-28 10:55:48 +0200 | [diff] [blame] | 6442 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6443 | unlock: |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6444 | if (!ret) { |
| 6445 | atomic_long_add(user_extra, &user->locked_vm); |
Davidlohr Bueso | 70f8a3c | 2019-02-06 09:59:15 -0800 | [diff] [blame] | 6446 | atomic64_add(extra, &vma->vm_mm->pinned_vm); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6447 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 6448 | atomic_inc(&event->mmap_count); |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 6449 | } else if (rb) { |
| 6450 | atomic_dec(&rb->mmap_count); |
| 6451 | } |
| 6452 | aux_unlock: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6453 | mutex_unlock(&event->mmap_mutex); |
| 6454 | |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 6455 | /* |
| 6456 | * Since pinned accounting is per vm we cannot allow fork() to copy our |
| 6457 | * vma. |
| 6458 | */ |
Peter Zijlstra | 26cb63a | 2013-05-28 10:55:48 +0200 | [diff] [blame] | 6459 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6460 | vma->vm_ops = &perf_mmap_vmops; |
| 6461 | |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 6462 | if (event->pmu->event_mapped) |
Peter Zijlstra | bfe33492 | 2017-08-02 19:39:30 +0200 | [diff] [blame] | 6463 | event->pmu->event_mapped(event, vma->vm_mm); |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 6464 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6465 | return ret; |
| 6466 | } |
| 6467 | |
| 6468 | static int perf_fasync(int fd, struct file *filp, int on) |
| 6469 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 6470 | struct inode *inode = file_inode(filp); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6471 | struct perf_event *event = filp->private_data; |
| 6472 | int retval; |
| 6473 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 6474 | inode_lock(inode); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6475 | retval = fasync_helper(fd, filp, on, &event->fasync); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 6476 | inode_unlock(inode); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6477 | |
| 6478 | if (retval < 0) |
| 6479 | return retval; |
| 6480 | |
| 6481 | return 0; |
| 6482 | } |
| 6483 | |
| 6484 | static const struct file_operations perf_fops = { |
Arnd Bergmann | 3326c1c | 2010-03-23 19:09:33 +0100 | [diff] [blame] | 6485 | .llseek = no_llseek, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6486 | .release = perf_release, |
| 6487 | .read = perf_read, |
| 6488 | .poll = perf_poll, |
| 6489 | .unlocked_ioctl = perf_ioctl, |
Pawel Moll | b3f2078 | 2014-06-13 16:03:32 +0100 | [diff] [blame] | 6490 | .compat_ioctl = perf_compat_ioctl, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6491 | .mmap = perf_mmap, |
| 6492 | .fasync = perf_fasync, |
| 6493 | }; |
| 6494 | |
| 6495 | /* |
| 6496 | * Perf event wakeup |
| 6497 | * |
| 6498 | * If there's data, ensure we set the poll() state and publish everything |
| 6499 | * to user-space before waking everybody up. |
| 6500 | */ |
| 6501 | |
Peter Zijlstra | fed66e2cd | 2015-06-11 10:32:01 +0200 | [diff] [blame] | 6502 | static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) |
| 6503 | { |
| 6504 | /* only the parent has fasync state */ |
| 6505 | if (event->parent) |
| 6506 | event = event->parent; |
| 6507 | return &event->fasync; |
| 6508 | } |
| 6509 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6510 | void perf_event_wakeup(struct perf_event *event) |
| 6511 | { |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 6512 | ring_buffer_wakeup(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6513 | |
| 6514 | if (event->pending_kill) { |
Peter Zijlstra | fed66e2cd | 2015-06-11 10:32:01 +0200 | [diff] [blame] | 6515 | kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6516 | event->pending_kill = 0; |
| 6517 | } |
| 6518 | } |
| 6519 | |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 6520 | static void perf_sigtrap(struct perf_event *event) |
| 6521 | { |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 6522 | /* |
| 6523 | * We'd expect this to only occur if the irq_work is delayed and either |
| 6524 | * ctx->task or current has changed in the meantime. This can be the |
| 6525 | * case on architectures that do not implement arch_irq_work_raise(). |
| 6526 | */ |
| 6527 | if (WARN_ON_ONCE(event->ctx->task != current)) |
| 6528 | return; |
| 6529 | |
| 6530 | /* |
| 6531 | * perf_pending_event() can race with the task exiting. |
| 6532 | */ |
| 6533 | if (current->flags & PF_EXITING) |
| 6534 | return; |
| 6535 | |
Eric W. Biederman | af5eeab | 2021-05-02 14:27:24 -0500 | [diff] [blame] | 6536 | force_sig_perf((void __user *)event->pending_addr, |
| 6537 | event->attr.type, event->attr.sig_data); |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 6538 | } |
| 6539 | |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 6540 | static void perf_pending_event_disable(struct perf_event *event) |
| 6541 | { |
| 6542 | int cpu = READ_ONCE(event->pending_disable); |
| 6543 | |
| 6544 | if (cpu < 0) |
| 6545 | return; |
| 6546 | |
| 6547 | if (cpu == smp_processor_id()) { |
| 6548 | WRITE_ONCE(event->pending_disable, -1); |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 6549 | |
| 6550 | if (event->attr.sigtrap) { |
| 6551 | perf_sigtrap(event); |
| 6552 | atomic_set_release(&event->event_limit, 1); /* rearm event */ |
| 6553 | return; |
| 6554 | } |
| 6555 | |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 6556 | perf_event_disable_local(event); |
| 6557 | return; |
| 6558 | } |
| 6559 | |
| 6560 | /* |
| 6561 | * CPU-A CPU-B |
| 6562 | * |
| 6563 | * perf_event_disable_inatomic() |
| 6564 | * @pending_disable = CPU-A; |
| 6565 | * irq_work_queue(); |
| 6566 | * |
| 6567 | * sched-out |
| 6568 | * @pending_disable = -1; |
| 6569 | * |
| 6570 | * sched-in |
| 6571 | * perf_event_disable_inatomic() |
| 6572 | * @pending_disable = CPU-B; |
| 6573 | * irq_work_queue(); // FAILS |
| 6574 | * |
| 6575 | * irq_work_run() |
| 6576 | * perf_pending_event() |
| 6577 | * |
| 6578 | * But the event runs on CPU-B and wants disabling there. |
| 6579 | */ |
| 6580 | irq_work_queue_on(&event->pending, cpu); |
| 6581 | } |
| 6582 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 6583 | static void perf_pending_event(struct irq_work *entry) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6584 | { |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 6585 | struct perf_event *event = container_of(entry, struct perf_event, pending); |
Peter Zijlstra | d525211 | 2015-02-19 18:03:11 +0100 | [diff] [blame] | 6586 | int rctx; |
| 6587 | |
| 6588 | rctx = perf_swevent_get_recursion_context(); |
| 6589 | /* |
| 6590 | * If we 'fail' here, that's OK, it means recursion is already disabled |
| 6591 | * and we won't recurse 'further'. |
| 6592 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6593 | |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 6594 | perf_pending_event_disable(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6595 | |
| 6596 | if (event->pending_wakeup) { |
| 6597 | event->pending_wakeup = 0; |
| 6598 | perf_event_wakeup(event); |
| 6599 | } |
Peter Zijlstra | d525211 | 2015-02-19 18:03:11 +0100 | [diff] [blame] | 6600 | |
| 6601 | if (rctx >= 0) |
| 6602 | perf_swevent_put_recursion_context(rctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6603 | } |
| 6604 | |
Sean Christopherson | 2aef6f3 | 2021-11-11 02:07:29 +0000 | [diff] [blame] | 6605 | #ifdef CONFIG_GUEST_PERF_EVENTS |
Sean Christopherson | ff083a2 | 2021-11-11 02:07:22 +0000 | [diff] [blame] | 6606 | struct perf_guest_info_callbacks __rcu *perf_guest_cbs; |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 6607 | |
Sean Christopherson | 87b940a | 2021-11-11 02:07:30 +0000 | [diff] [blame] | 6608 | DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state); |
| 6609 | DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip); |
| 6610 | DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr); |
| 6611 | |
Sean Christopherson | 2934e3d0 | 2021-11-11 02:07:25 +0000 | [diff] [blame] | 6612 | void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 6613 | { |
Sean Christopherson | ff083a2 | 2021-11-11 02:07:22 +0000 | [diff] [blame] | 6614 | if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs))) |
Sean Christopherson | 2934e3d0 | 2021-11-11 02:07:25 +0000 | [diff] [blame] | 6615 | return; |
Sean Christopherson | ff083a2 | 2021-11-11 02:07:22 +0000 | [diff] [blame] | 6616 | |
| 6617 | rcu_assign_pointer(perf_guest_cbs, cbs); |
Sean Christopherson | 87b940a | 2021-11-11 02:07:30 +0000 | [diff] [blame] | 6618 | static_call_update(__perf_guest_state, cbs->state); |
| 6619 | static_call_update(__perf_guest_get_ip, cbs->get_ip); |
| 6620 | |
| 6621 | /* Implementing ->handle_intel_pt_intr is optional. */ |
| 6622 | if (cbs->handle_intel_pt_intr) |
| 6623 | static_call_update(__perf_guest_handle_intel_pt_intr, |
| 6624 | cbs->handle_intel_pt_intr); |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 6625 | } |
| 6626 | EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); |
| 6627 | |
Sean Christopherson | 2934e3d0 | 2021-11-11 02:07:25 +0000 | [diff] [blame] | 6628 | void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 6629 | { |
Sean Christopherson | ff083a2 | 2021-11-11 02:07:22 +0000 | [diff] [blame] | 6630 | if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs)) |
Sean Christopherson | 2934e3d0 | 2021-11-11 02:07:25 +0000 | [diff] [blame] | 6631 | return; |
Sean Christopherson | ff083a2 | 2021-11-11 02:07:22 +0000 | [diff] [blame] | 6632 | |
| 6633 | rcu_assign_pointer(perf_guest_cbs, NULL); |
Sean Christopherson | 87b940a | 2021-11-11 02:07:30 +0000 | [diff] [blame] | 6634 | static_call_update(__perf_guest_state, (void *)&__static_call_return0); |
| 6635 | static_call_update(__perf_guest_get_ip, (void *)&__static_call_return0); |
| 6636 | static_call_update(__perf_guest_handle_intel_pt_intr, |
| 6637 | (void *)&__static_call_return0); |
Sean Christopherson | ff083a2 | 2021-11-11 02:07:22 +0000 | [diff] [blame] | 6638 | synchronize_rcu(); |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 6639 | } |
| 6640 | EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); |
Sean Christopherson | 2aef6f3 | 2021-11-11 02:07:29 +0000 | [diff] [blame] | 6641 | #endif |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 6642 | |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6643 | static void |
| 6644 | perf_output_sample_regs(struct perf_output_handle *handle, |
| 6645 | struct pt_regs *regs, u64 mask) |
| 6646 | { |
| 6647 | int bit; |
Madhavan Srinivasan | 29dd328 | 2016-08-17 15:06:08 +0530 | [diff] [blame] | 6648 | DECLARE_BITMAP(_mask, 64); |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6649 | |
Madhavan Srinivasan | 29dd328 | 2016-08-17 15:06:08 +0530 | [diff] [blame] | 6650 | bitmap_from_u64(_mask, mask); |
| 6651 | for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) { |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6652 | u64 val; |
| 6653 | |
| 6654 | val = perf_reg_value(regs, bit); |
| 6655 | perf_output_put(handle, val); |
| 6656 | } |
| 6657 | } |
| 6658 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 6659 | static void perf_sample_regs_user(struct perf_regs *regs_user, |
Peter Zijlstra | 76a4efa | 2020-10-30 12:14:21 +0100 | [diff] [blame] | 6660 | struct pt_regs *regs) |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6661 | { |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 6662 | if (user_mode(regs)) { |
| 6663 | regs_user->abi = perf_reg_abi(current); |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 6664 | regs_user->regs = regs; |
Peter Zijlstra | 085ebfe | 2019-05-29 14:37:24 +0200 | [diff] [blame] | 6665 | } else if (!(current->flags & PF_KTHREAD)) { |
Peter Zijlstra | 76a4efa | 2020-10-30 12:14:21 +0100 | [diff] [blame] | 6666 | perf_get_regs_user(regs_user, regs); |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 6667 | } else { |
| 6668 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; |
| 6669 | regs_user->regs = NULL; |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 6670 | } |
| 6671 | } |
| 6672 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 6673 | static void perf_sample_regs_intr(struct perf_regs *regs_intr, |
| 6674 | struct pt_regs *regs) |
| 6675 | { |
| 6676 | regs_intr->regs = regs; |
| 6677 | regs_intr->abi = perf_reg_abi(current); |
| 6678 | } |
| 6679 | |
| 6680 | |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6681 | /* |
| 6682 | * Get remaining task size from user stack pointer. |
| 6683 | * |
| 6684 | * It'd be better to take stack vma map and limit this more |
Roy Ben Shlomo | 9f014e3 | 2019-09-20 20:12:53 +0300 | [diff] [blame] | 6685 | * precisely, but there's no way to get it safely under interrupt, |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6686 | * so using TASK_SIZE as limit. |
| 6687 | */ |
| 6688 | static u64 perf_ustack_task_size(struct pt_regs *regs) |
| 6689 | { |
| 6690 | unsigned long addr = perf_user_stack_pointer(regs); |
| 6691 | |
| 6692 | if (!addr || addr >= TASK_SIZE) |
| 6693 | return 0; |
| 6694 | |
| 6695 | return TASK_SIZE - addr; |
| 6696 | } |
| 6697 | |
| 6698 | static u16 |
| 6699 | perf_sample_ustack_size(u16 stack_size, u16 header_size, |
| 6700 | struct pt_regs *regs) |
| 6701 | { |
| 6702 | u64 task_size; |
| 6703 | |
| 6704 | /* No regs, no stack pointer, no dump. */ |
| 6705 | if (!regs) |
| 6706 | return 0; |
| 6707 | |
| 6708 | /* |
| 6709 | * Check if we fit in with the requested stack size into the: |
| 6710 | * - TASK_SIZE |
| 6711 | * If we don't, we limit the size to the TASK_SIZE. |
| 6712 | * |
| 6713 | * - remaining sample size |
| 6714 | * If we don't, we customize the stack size to |
| 6715 | * fit in to the remaining sample size. |
| 6716 | */ |
| 6717 | |
| 6718 | task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); |
| 6719 | stack_size = min(stack_size, (u16) task_size); |
| 6720 | |
| 6721 | /* Current header size plus static size and dynamic size. */ |
| 6722 | header_size += 2 * sizeof(u64); |
| 6723 | |
| 6724 | /* Do we fit in with the current stack dump size? */ |
| 6725 | if ((u16) (header_size + stack_size) < header_size) { |
| 6726 | /* |
| 6727 | * If we overflow the maximum size for the sample, |
| 6728 | * we customize the stack dump size to fit in. |
| 6729 | */ |
| 6730 | stack_size = USHRT_MAX - header_size - sizeof(u64); |
| 6731 | stack_size = round_up(stack_size, sizeof(u64)); |
| 6732 | } |
| 6733 | |
| 6734 | return stack_size; |
| 6735 | } |
| 6736 | |
| 6737 | static void |
| 6738 | perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, |
| 6739 | struct pt_regs *regs) |
| 6740 | { |
| 6741 | /* Case of a kernel thread, nothing to dump */ |
| 6742 | if (!regs) { |
| 6743 | u64 size = 0; |
| 6744 | perf_output_put(handle, size); |
| 6745 | } else { |
| 6746 | unsigned long sp; |
| 6747 | unsigned int rem; |
| 6748 | u64 dyn_size; |
Yabin Cui | 02e1844 | 2018-08-23 15:59:35 -0700 | [diff] [blame] | 6749 | mm_segment_t fs; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6750 | |
| 6751 | /* |
| 6752 | * We dump: |
| 6753 | * static size |
| 6754 | * - the size requested by user or the best one we can fit |
| 6755 | * in to the sample max size |
| 6756 | * data |
| 6757 | * - user stack dump data |
| 6758 | * dynamic size |
| 6759 | * - the actual dumped size |
| 6760 | */ |
| 6761 | |
| 6762 | /* Static size. */ |
| 6763 | perf_output_put(handle, dump_size); |
| 6764 | |
| 6765 | /* Data. */ |
| 6766 | sp = perf_user_stack_pointer(regs); |
Christoph Hellwig | 3d13f31 | 2020-08-11 18:33:47 -0700 | [diff] [blame] | 6767 | fs = force_uaccess_begin(); |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6768 | rem = __output_copy_user(handle, (void *) sp, dump_size); |
Christoph Hellwig | 3d13f31 | 2020-08-11 18:33:47 -0700 | [diff] [blame] | 6769 | force_uaccess_end(fs); |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 6770 | dyn_size = dump_size - rem; |
| 6771 | |
| 6772 | perf_output_skip(handle, rem); |
| 6773 | |
| 6774 | /* Dynamic size. */ |
| 6775 | perf_output_put(handle, dyn_size); |
| 6776 | } |
| 6777 | } |
| 6778 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6779 | static unsigned long perf_prepare_sample_aux(struct perf_event *event, |
| 6780 | struct perf_sample_data *data, |
| 6781 | size_t size) |
| 6782 | { |
| 6783 | struct perf_event *sampler = event->aux_event; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6784 | struct perf_buffer *rb; |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6785 | |
| 6786 | data->aux_size = 0; |
| 6787 | |
| 6788 | if (!sampler) |
| 6789 | goto out; |
| 6790 | |
| 6791 | if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE)) |
| 6792 | goto out; |
| 6793 | |
| 6794 | if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id())) |
| 6795 | goto out; |
| 6796 | |
James Clark | 961c391 | 2021-12-06 11:38:40 +0000 | [diff] [blame] | 6797 | rb = ring_buffer_get(sampler); |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6798 | if (!rb) |
| 6799 | goto out; |
| 6800 | |
| 6801 | /* |
| 6802 | * If this is an NMI hit inside sampling code, don't take |
| 6803 | * the sample. See also perf_aux_sample_output(). |
| 6804 | */ |
| 6805 | if (READ_ONCE(rb->aux_in_sampling)) { |
| 6806 | data->aux_size = 0; |
| 6807 | } else { |
| 6808 | size = min_t(size_t, size, perf_aux_size(rb)); |
| 6809 | data->aux_size = ALIGN(size, sizeof(u64)); |
| 6810 | } |
| 6811 | ring_buffer_put(rb); |
| 6812 | |
| 6813 | out: |
| 6814 | return data->aux_size; |
| 6815 | } |
| 6816 | |
Haocheng Xie | 32961ae | 2021-05-27 11:19:45 +0800 | [diff] [blame] | 6817 | static long perf_pmu_snapshot_aux(struct perf_buffer *rb, |
| 6818 | struct perf_event *event, |
| 6819 | struct perf_output_handle *handle, |
| 6820 | unsigned long size) |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6821 | { |
| 6822 | unsigned long flags; |
| 6823 | long ret; |
| 6824 | |
| 6825 | /* |
| 6826 | * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler |
| 6827 | * paths. If we start calling them in NMI context, they may race with |
| 6828 | * the IRQ ones, that is, for example, re-starting an event that's just |
| 6829 | * been stopped, which is why we're using a separate callback that |
| 6830 | * doesn't change the event state. |
| 6831 | * |
| 6832 | * IRQs need to be disabled to prevent IPIs from racing with us. |
| 6833 | */ |
| 6834 | local_irq_save(flags); |
| 6835 | /* |
| 6836 | * Guard against NMI hits inside the critical section; |
| 6837 | * see also perf_prepare_sample_aux(). |
| 6838 | */ |
| 6839 | WRITE_ONCE(rb->aux_in_sampling, 1); |
| 6840 | barrier(); |
| 6841 | |
| 6842 | ret = event->pmu->snapshot_aux(event, handle, size); |
| 6843 | |
| 6844 | barrier(); |
| 6845 | WRITE_ONCE(rb->aux_in_sampling, 0); |
| 6846 | local_irq_restore(flags); |
| 6847 | |
| 6848 | return ret; |
| 6849 | } |
| 6850 | |
| 6851 | static void perf_aux_sample_output(struct perf_event *event, |
| 6852 | struct perf_output_handle *handle, |
| 6853 | struct perf_sample_data *data) |
| 6854 | { |
| 6855 | struct perf_event *sampler = event->aux_event; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 6856 | struct perf_buffer *rb; |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6857 | unsigned long pad; |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6858 | long size; |
| 6859 | |
| 6860 | if (WARN_ON_ONCE(!sampler || !data->aux_size)) |
| 6861 | return; |
| 6862 | |
James Clark | 961c391 | 2021-12-06 11:38:40 +0000 | [diff] [blame] | 6863 | rb = ring_buffer_get(sampler); |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 6864 | if (!rb) |
| 6865 | return; |
| 6866 | |
| 6867 | size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size); |
| 6868 | |
| 6869 | /* |
| 6870 | * An error here means that perf_output_copy() failed (returned a |
| 6871 | * non-zero surplus that it didn't copy), which in its current |
| 6872 | * enlightened implementation is not possible. If that changes, we'd |
| 6873 | * like to know. |
| 6874 | */ |
| 6875 | if (WARN_ON_ONCE(size < 0)) |
| 6876 | goto out_put; |
| 6877 | |
| 6878 | /* |
| 6879 | * The pad comes from ALIGN()ing data->aux_size up to u64 in |
| 6880 | * perf_prepare_sample_aux(), so should not be more than that. |
| 6881 | */ |
| 6882 | pad = data->aux_size - size; |
| 6883 | if (WARN_ON_ONCE(pad >= sizeof(u64))) |
| 6884 | pad = 8; |
| 6885 | |
| 6886 | if (pad) { |
| 6887 | u64 zero = 0; |
| 6888 | perf_output_copy(handle, &zero, pad); |
| 6889 | } |
| 6890 | |
| 6891 | out_put: |
| 6892 | ring_buffer_put(rb); |
| 6893 | } |
| 6894 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6895 | static void __perf_event_header__init_id(struct perf_event_header *header, |
| 6896 | struct perf_sample_data *data, |
| 6897 | struct perf_event *event) |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6898 | { |
| 6899 | u64 sample_type = event->attr.sample_type; |
| 6900 | |
| 6901 | data->type = sample_type; |
| 6902 | header->size += event->id_header_size; |
| 6903 | |
| 6904 | if (sample_type & PERF_SAMPLE_TID) { |
| 6905 | /* namespace issues */ |
| 6906 | data->tid_entry.pid = perf_event_pid(event, current); |
| 6907 | data->tid_entry.tid = perf_event_tid(event, current); |
| 6908 | } |
| 6909 | |
| 6910 | if (sample_type & PERF_SAMPLE_TIME) |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 6911 | data->time = perf_event_clock(event); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6912 | |
Adrian Hunter | ff3d527 | 2013-08-27 11:23:07 +0300 | [diff] [blame] | 6913 | if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 6914 | data->id = primary_event_id(event); |
| 6915 | |
| 6916 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 6917 | data->stream_id = event->id; |
| 6918 | |
| 6919 | if (sample_type & PERF_SAMPLE_CPU) { |
| 6920 | data->cpu_entry.cpu = raw_smp_processor_id(); |
| 6921 | data->cpu_entry.reserved = 0; |
| 6922 | } |
| 6923 | } |
| 6924 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6925 | void perf_event_header__init_id(struct perf_event_header *header, |
| 6926 | struct perf_sample_data *data, |
| 6927 | struct perf_event *event) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6928 | { |
| 6929 | if (event->attr.sample_id_all) |
| 6930 | __perf_event_header__init_id(header, data, event); |
| 6931 | } |
| 6932 | |
| 6933 | static void __perf_event__output_id_sample(struct perf_output_handle *handle, |
| 6934 | struct perf_sample_data *data) |
| 6935 | { |
| 6936 | u64 sample_type = data->type; |
| 6937 | |
| 6938 | if (sample_type & PERF_SAMPLE_TID) |
| 6939 | perf_output_put(handle, data->tid_entry); |
| 6940 | |
| 6941 | if (sample_type & PERF_SAMPLE_TIME) |
| 6942 | perf_output_put(handle, data->time); |
| 6943 | |
| 6944 | if (sample_type & PERF_SAMPLE_ID) |
| 6945 | perf_output_put(handle, data->id); |
| 6946 | |
| 6947 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 6948 | perf_output_put(handle, data->stream_id); |
| 6949 | |
| 6950 | if (sample_type & PERF_SAMPLE_CPU) |
| 6951 | perf_output_put(handle, data->cpu_entry); |
Adrian Hunter | ff3d527 | 2013-08-27 11:23:07 +0300 | [diff] [blame] | 6952 | |
| 6953 | if (sample_type & PERF_SAMPLE_IDENTIFIER) |
| 6954 | perf_output_put(handle, data->id); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6955 | } |
| 6956 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6957 | void perf_event__output_id_sample(struct perf_event *event, |
| 6958 | struct perf_output_handle *handle, |
| 6959 | struct perf_sample_data *sample) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 6960 | { |
| 6961 | if (event->attr.sample_id_all) |
| 6962 | __perf_event__output_id_sample(handle, sample); |
| 6963 | } |
| 6964 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6965 | static void perf_output_read_one(struct perf_output_handle *handle, |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6966 | struct perf_event *event, |
| 6967 | u64 enabled, u64 running) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6968 | { |
| 6969 | u64 read_format = event->attr.read_format; |
| 6970 | u64 values[4]; |
| 6971 | int n = 0; |
| 6972 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 6973 | values[n++] = perf_event_count(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6974 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6975 | values[n++] = enabled + |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6976 | atomic64_read(&event->child_total_time_enabled); |
| 6977 | } |
| 6978 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6979 | values[n++] = running + |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6980 | atomic64_read(&event->child_total_time_running); |
| 6981 | } |
| 6982 | if (read_format & PERF_FORMAT_ID) |
| 6983 | values[n++] = primary_event_id(event); |
| 6984 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 6985 | __output_copy(handle, values, n * sizeof(u64)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6986 | } |
| 6987 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6988 | static void perf_output_read_group(struct perf_output_handle *handle, |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 6989 | struct perf_event *event, |
| 6990 | u64 enabled, u64 running) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 6991 | { |
| 6992 | struct perf_event *leader = event->group_leader, *sub; |
| 6993 | u64 read_format = event->attr.read_format; |
| 6994 | u64 values[5]; |
| 6995 | int n = 0; |
| 6996 | |
| 6997 | values[n++] = 1 + leader->nr_siblings; |
| 6998 | |
| 6999 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 7000 | values[n++] = enabled; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7001 | |
| 7002 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 7003 | values[n++] = running; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7004 | |
Peter Zijlstra | 9e5b127 | 2018-03-09 12:52:04 +0100 | [diff] [blame] | 7005 | if ((leader != event) && |
| 7006 | (leader->state == PERF_EVENT_STATE_ACTIVE)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7007 | leader->pmu->read(leader); |
| 7008 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 7009 | values[n++] = perf_event_count(leader); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7010 | if (read_format & PERF_FORMAT_ID) |
| 7011 | values[n++] = primary_event_id(leader); |
| 7012 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 7013 | __output_copy(handle, values, n * sizeof(u64)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7014 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 7015 | for_each_sibling_event(sub, leader) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7016 | n = 0; |
| 7017 | |
Jiri Olsa | 6f5ab00 | 2012-10-15 20:13:45 +0200 | [diff] [blame] | 7018 | if ((sub != event) && |
| 7019 | (sub->state == PERF_EVENT_STATE_ACTIVE)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7020 | sub->pmu->read(sub); |
| 7021 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 7022 | values[n++] = perf_event_count(sub); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7023 | if (read_format & PERF_FORMAT_ID) |
| 7024 | values[n++] = primary_event_id(sub); |
| 7025 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 7026 | __output_copy(handle, values, n * sizeof(u64)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7027 | } |
| 7028 | } |
| 7029 | |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 7030 | #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ |
| 7031 | PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 7032 | |
Peter Zijlstra | ba5213a | 2017-05-30 11:45:12 +0200 | [diff] [blame] | 7033 | /* |
| 7034 | * XXX PERF_SAMPLE_READ vs inherited events seems difficult. |
| 7035 | * |
| 7036 | * The problem is that its both hard and excessively expensive to iterate the |
| 7037 | * child list, not to mention that its impossible to IPI the children running |
| 7038 | * on another CPU, from interrupt/NMI context. |
| 7039 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7040 | static void perf_output_read(struct perf_output_handle *handle, |
| 7041 | struct perf_event *event) |
| 7042 | { |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 7043 | u64 enabled = 0, running = 0, now; |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 7044 | u64 read_format = event->attr.read_format; |
| 7045 | |
| 7046 | /* |
| 7047 | * compute total_time_enabled, total_time_running |
| 7048 | * based on snapshot values taken when the event |
| 7049 | * was last scheduled in. |
| 7050 | * |
| 7051 | * we cannot simply called update_context_time() |
| 7052 | * because of locking issue as we are called in |
| 7053 | * NMI context |
| 7054 | */ |
Eric B Munson | c479429 | 2011-06-23 16:34:38 -0400 | [diff] [blame] | 7055 | if (read_format & PERF_FORMAT_TOTAL_TIMES) |
Peter Zijlstra | e3f3541 | 2011-11-21 11:43:53 +0100 | [diff] [blame] | 7056 | calc_timer_values(event, &now, &enabled, &running); |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 7057 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7058 | if (event->attr.read_format & PERF_FORMAT_GROUP) |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 7059 | perf_output_read_group(handle, event, enabled, running); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7060 | else |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 7061 | perf_output_read_one(handle, event, enabled, running); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7062 | } |
| 7063 | |
Kan Liang | bbfd5e4 | 2020-01-27 08:53:54 -0800 | [diff] [blame] | 7064 | static inline bool perf_sample_save_hw_index(struct perf_event *event) |
| 7065 | { |
| 7066 | return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; |
| 7067 | } |
| 7068 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7069 | void perf_output_sample(struct perf_output_handle *handle, |
| 7070 | struct perf_event_header *header, |
| 7071 | struct perf_sample_data *data, |
| 7072 | struct perf_event *event) |
| 7073 | { |
| 7074 | u64 sample_type = data->type; |
| 7075 | |
| 7076 | perf_output_put(handle, *header); |
| 7077 | |
Adrian Hunter | ff3d527 | 2013-08-27 11:23:07 +0300 | [diff] [blame] | 7078 | if (sample_type & PERF_SAMPLE_IDENTIFIER) |
| 7079 | perf_output_put(handle, data->id); |
| 7080 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7081 | if (sample_type & PERF_SAMPLE_IP) |
| 7082 | perf_output_put(handle, data->ip); |
| 7083 | |
| 7084 | if (sample_type & PERF_SAMPLE_TID) |
| 7085 | perf_output_put(handle, data->tid_entry); |
| 7086 | |
| 7087 | if (sample_type & PERF_SAMPLE_TIME) |
| 7088 | perf_output_put(handle, data->time); |
| 7089 | |
| 7090 | if (sample_type & PERF_SAMPLE_ADDR) |
| 7091 | perf_output_put(handle, data->addr); |
| 7092 | |
| 7093 | if (sample_type & PERF_SAMPLE_ID) |
| 7094 | perf_output_put(handle, data->id); |
| 7095 | |
| 7096 | if (sample_type & PERF_SAMPLE_STREAM_ID) |
| 7097 | perf_output_put(handle, data->stream_id); |
| 7098 | |
| 7099 | if (sample_type & PERF_SAMPLE_CPU) |
| 7100 | perf_output_put(handle, data->cpu_entry); |
| 7101 | |
| 7102 | if (sample_type & PERF_SAMPLE_PERIOD) |
| 7103 | perf_output_put(handle, data->period); |
| 7104 | |
| 7105 | if (sample_type & PERF_SAMPLE_READ) |
| 7106 | perf_output_read(handle, event); |
| 7107 | |
| 7108 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 7109 | int size = 1; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7110 | |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 7111 | size += data->callchain->nr; |
| 7112 | size *= sizeof(u64); |
| 7113 | __output_copy(handle, data->callchain, size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7114 | } |
| 7115 | |
| 7116 | if (sample_type & PERF_SAMPLE_RAW) { |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 7117 | struct perf_raw_record *raw = data->raw; |
Alexei Starovoitov | fa128e6 | 2015-10-20 20:02:33 -0700 | [diff] [blame] | 7118 | |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 7119 | if (raw) { |
| 7120 | struct perf_raw_frag *frag = &raw->frag; |
| 7121 | |
| 7122 | perf_output_put(handle, raw->size); |
| 7123 | do { |
| 7124 | if (frag->copy) { |
| 7125 | __output_custom(handle, frag->copy, |
| 7126 | frag->data, frag->size); |
| 7127 | } else { |
| 7128 | __output_copy(handle, frag->data, |
| 7129 | frag->size); |
| 7130 | } |
| 7131 | if (perf_raw_frag_last(frag)) |
| 7132 | break; |
| 7133 | frag = frag->next; |
| 7134 | } while (1); |
| 7135 | if (frag->pad) |
| 7136 | __output_skip(handle, NULL, frag->pad); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7137 | } else { |
| 7138 | struct { |
| 7139 | u32 size; |
| 7140 | u32 data; |
| 7141 | } raw = { |
| 7142 | .size = sizeof(u32), |
| 7143 | .data = 0, |
| 7144 | }; |
| 7145 | perf_output_put(handle, raw); |
| 7146 | } |
| 7147 | } |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 7148 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 7149 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { |
| 7150 | if (data->br_stack) { |
| 7151 | size_t size; |
| 7152 | |
| 7153 | size = data->br_stack->nr |
| 7154 | * sizeof(struct perf_branch_entry); |
| 7155 | |
| 7156 | perf_output_put(handle, data->br_stack->nr); |
Kan Liang | bbfd5e4 | 2020-01-27 08:53:54 -0800 | [diff] [blame] | 7157 | if (perf_sample_save_hw_index(event)) |
| 7158 | perf_output_put(handle, data->br_stack->hw_idx); |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 7159 | perf_output_copy(handle, data->br_stack->entries, size); |
| 7160 | } else { |
| 7161 | /* |
| 7162 | * we always store at least the value of nr |
| 7163 | */ |
| 7164 | u64 nr = 0; |
| 7165 | perf_output_put(handle, nr); |
| 7166 | } |
| 7167 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 7168 | |
| 7169 | if (sample_type & PERF_SAMPLE_REGS_USER) { |
| 7170 | u64 abi = data->regs_user.abi; |
| 7171 | |
| 7172 | /* |
| 7173 | * If there are no regs to dump, notice it through |
| 7174 | * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). |
| 7175 | */ |
| 7176 | perf_output_put(handle, abi); |
| 7177 | |
| 7178 | if (abi) { |
| 7179 | u64 mask = event->attr.sample_regs_user; |
| 7180 | perf_output_sample_regs(handle, |
| 7181 | data->regs_user.regs, |
| 7182 | mask); |
| 7183 | } |
| 7184 | } |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 7185 | |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 7186 | if (sample_type & PERF_SAMPLE_STACK_USER) { |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 7187 | perf_output_sample_ustack(handle, |
| 7188 | data->stack_user_size, |
| 7189 | data->regs_user.regs); |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 7190 | } |
Andi Kleen | c3feedf | 2013-01-24 16:10:28 +0100 | [diff] [blame] | 7191 | |
Kan Liang | 2a6c6b7 | 2021-01-28 14:40:07 -0800 | [diff] [blame] | 7192 | if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) |
| 7193 | perf_output_put(handle, data->weight.full); |
Stephane Eranian | d6be9ad | 2013-01-24 16:10:31 +0100 | [diff] [blame] | 7194 | |
| 7195 | if (sample_type & PERF_SAMPLE_DATA_SRC) |
| 7196 | perf_output_put(handle, data->data_src.val); |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 7197 | |
Andi Kleen | fdfbbd0 | 2013-09-20 07:40:39 -0700 | [diff] [blame] | 7198 | if (sample_type & PERF_SAMPLE_TRANSACTION) |
| 7199 | perf_output_put(handle, data->txn); |
| 7200 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 7201 | if (sample_type & PERF_SAMPLE_REGS_INTR) { |
| 7202 | u64 abi = data->regs_intr.abi; |
| 7203 | /* |
| 7204 | * If there are no regs to dump, notice it through |
| 7205 | * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). |
| 7206 | */ |
| 7207 | perf_output_put(handle, abi); |
| 7208 | |
| 7209 | if (abi) { |
| 7210 | u64 mask = event->attr.sample_regs_intr; |
| 7211 | |
| 7212 | perf_output_sample_regs(handle, |
| 7213 | data->regs_intr.regs, |
| 7214 | mask); |
| 7215 | } |
| 7216 | } |
| 7217 | |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 7218 | if (sample_type & PERF_SAMPLE_PHYS_ADDR) |
| 7219 | perf_output_put(handle, data->phys_addr); |
| 7220 | |
Namhyung Kim | 6546b19 | 2020-03-25 21:45:29 +0900 | [diff] [blame] | 7221 | if (sample_type & PERF_SAMPLE_CGROUP) |
| 7222 | perf_output_put(handle, data->cgroup); |
| 7223 | |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7224 | if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) |
| 7225 | perf_output_put(handle, data->data_page_size); |
| 7226 | |
Stephane Eranian | 995f088 | 2020-10-01 06:57:49 -0700 | [diff] [blame] | 7227 | if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) |
| 7228 | perf_output_put(handle, data->code_page_size); |
| 7229 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 7230 | if (sample_type & PERF_SAMPLE_AUX) { |
| 7231 | perf_output_put(handle, data->aux_size); |
| 7232 | |
| 7233 | if (data->aux_size) |
| 7234 | perf_aux_sample_output(event, handle, data); |
| 7235 | } |
| 7236 | |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 7237 | if (!event->attr.watermark) { |
| 7238 | int wakeup_events = event->attr.wakeup_events; |
| 7239 | |
| 7240 | if (wakeup_events) { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 7241 | struct perf_buffer *rb = handle->rb; |
Peter Zijlstra | a5cdd40 | 2013-07-16 17:09:07 +0200 | [diff] [blame] | 7242 | int events = local_inc_return(&rb->events); |
| 7243 | |
| 7244 | if (events >= wakeup_events) { |
| 7245 | local_sub(wakeup_events, &rb->events); |
| 7246 | local_inc(&rb->wakeup); |
| 7247 | } |
| 7248 | } |
| 7249 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7250 | } |
| 7251 | |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 7252 | static u64 perf_virt_to_phys(u64 virt) |
| 7253 | { |
| 7254 | u64 phys_addr = 0; |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 7255 | |
| 7256 | if (!virt) |
| 7257 | return 0; |
| 7258 | |
| 7259 | if (virt >= TASK_SIZE) { |
| 7260 | /* If it's vmalloc()d memory, leave phys_addr as 0 */ |
| 7261 | if (virt_addr_valid((void *)(uintptr_t)virt) && |
| 7262 | !(virt >= VMALLOC_START && virt < VMALLOC_END)) |
| 7263 | phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt); |
| 7264 | } else { |
| 7265 | /* |
| 7266 | * Walking the pages tables for user address. |
| 7267 | * Interrupts are disabled, so it prevents any tear down |
| 7268 | * of the page tables. |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 7269 | * Try IRQ-safe get_user_page_fast_only first. |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 7270 | * If failed, leave phys_addr as 0. |
| 7271 | */ |
Jiri Olsa | d3296fb | 2020-04-07 16:14:27 +0200 | [diff] [blame] | 7272 | if (current->mm != NULL) { |
Greg Thelen | 4716023 | 2021-11-10 18:18:14 -0800 | [diff] [blame] | 7273 | struct page *p; |
| 7274 | |
Jiri Olsa | d3296fb | 2020-04-07 16:14:27 +0200 | [diff] [blame] | 7275 | pagefault_disable(); |
Greg Thelen | 4716023 | 2021-11-10 18:18:14 -0800 | [diff] [blame] | 7276 | if (get_user_page_fast_only(virt, 0, &p)) { |
Jiri Olsa | d3296fb | 2020-04-07 16:14:27 +0200 | [diff] [blame] | 7277 | phys_addr = page_to_phys(p) + virt % PAGE_SIZE; |
Greg Thelen | 4716023 | 2021-11-10 18:18:14 -0800 | [diff] [blame] | 7278 | put_page(p); |
| 7279 | } |
Jiri Olsa | d3296fb | 2020-04-07 16:14:27 +0200 | [diff] [blame] | 7280 | pagefault_enable(); |
| 7281 | } |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 7282 | } |
| 7283 | |
| 7284 | return phys_addr; |
| 7285 | } |
| 7286 | |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7287 | /* |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7288 | * Return the pagetable size of a given virtual address. |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7289 | */ |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7290 | static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr) |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7291 | { |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7292 | u64 size = 0; |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7293 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7294 | #ifdef CONFIG_HAVE_FAST_GUP |
| 7295 | pgd_t *pgdp, pgd; |
| 7296 | p4d_t *p4dp, p4d; |
| 7297 | pud_t *pudp, pud; |
| 7298 | pmd_t *pmdp, pmd; |
| 7299 | pte_t *ptep, pte; |
| 7300 | |
| 7301 | pgdp = pgd_offset(mm, addr); |
| 7302 | pgd = READ_ONCE(*pgdp); |
| 7303 | if (pgd_none(pgd)) |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7304 | return 0; |
| 7305 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7306 | if (pgd_leaf(pgd)) |
| 7307 | return pgd_leaf_size(pgd); |
| 7308 | |
| 7309 | p4dp = p4d_offset_lockless(pgdp, pgd, addr); |
| 7310 | p4d = READ_ONCE(*p4dp); |
| 7311 | if (!p4d_present(p4d)) |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7312 | return 0; |
| 7313 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7314 | if (p4d_leaf(p4d)) |
| 7315 | return p4d_leaf_size(p4d); |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7316 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7317 | pudp = pud_offset_lockless(p4dp, p4d, addr); |
| 7318 | pud = READ_ONCE(*pudp); |
| 7319 | if (!pud_present(pud)) |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7320 | return 0; |
| 7321 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7322 | if (pud_leaf(pud)) |
| 7323 | return pud_leaf_size(pud); |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7324 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7325 | pmdp = pmd_offset_lockless(pudp, pud, addr); |
| 7326 | pmd = READ_ONCE(*pmdp); |
| 7327 | if (!pmd_present(pmd)) |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7328 | return 0; |
| 7329 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7330 | if (pmd_leaf(pmd)) |
| 7331 | return pmd_leaf_size(pmd); |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7332 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7333 | ptep = pte_offset_map(&pmd, addr); |
| 7334 | pte = ptep_get_lockless(ptep); |
| 7335 | if (pte_present(pte)) |
| 7336 | size = pte_leaf_size(pte); |
| 7337 | pte_unmap(ptep); |
| 7338 | #endif /* CONFIG_HAVE_FAST_GUP */ |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7339 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7340 | return size; |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7341 | } |
| 7342 | |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7343 | static u64 perf_get_page_size(unsigned long addr) |
| 7344 | { |
| 7345 | struct mm_struct *mm; |
| 7346 | unsigned long flags; |
| 7347 | u64 size; |
| 7348 | |
| 7349 | if (!addr) |
| 7350 | return 0; |
| 7351 | |
| 7352 | /* |
| 7353 | * Software page-table walkers must disable IRQs, |
| 7354 | * which prevents any tear down of the page tables. |
| 7355 | */ |
| 7356 | local_irq_save(flags); |
| 7357 | |
| 7358 | mm = current->mm; |
| 7359 | if (!mm) { |
| 7360 | /* |
| 7361 | * For kernel threads and the like, use init_mm so that |
| 7362 | * we can find kernel memory. |
| 7363 | */ |
| 7364 | mm = &init_mm; |
| 7365 | } |
| 7366 | |
Peter Zijlstra | 8af26be | 2020-11-11 13:43:57 +0100 | [diff] [blame] | 7367 | size = perf_get_pgtable_size(mm, addr); |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7368 | |
| 7369 | local_irq_restore(flags); |
| 7370 | |
| 7371 | return size; |
| 7372 | } |
| 7373 | |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 7374 | static struct perf_callchain_entry __empty_callchain = { .nr = 0, }; |
| 7375 | |
Peter Zijlstra | 6cbc304 | 2018-05-10 15:48:41 +0200 | [diff] [blame] | 7376 | struct perf_callchain_entry * |
Jiri Olsa | 8cf7e0e | 2018-01-07 17:03:49 +0100 | [diff] [blame] | 7377 | perf_callchain(struct perf_event *event, struct pt_regs *regs) |
| 7378 | { |
| 7379 | bool kernel = !event->attr.exclude_callchain_kernel; |
| 7380 | bool user = !event->attr.exclude_callchain_user; |
| 7381 | /* Disallow cross-task user callchains. */ |
| 7382 | bool crosstask = event->ctx->task && event->ctx->task != current; |
| 7383 | const u32 max_stack = event->attr.sample_max_stack; |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 7384 | struct perf_callchain_entry *callchain; |
Jiri Olsa | 8cf7e0e | 2018-01-07 17:03:49 +0100 | [diff] [blame] | 7385 | |
| 7386 | if (!kernel && !user) |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 7387 | return &__empty_callchain; |
Jiri Olsa | 8cf7e0e | 2018-01-07 17:03:49 +0100 | [diff] [blame] | 7388 | |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 7389 | callchain = get_perf_callchain(regs, 0, kernel, user, |
| 7390 | max_stack, crosstask, true); |
| 7391 | return callchain ?: &__empty_callchain; |
Jiri Olsa | 8cf7e0e | 2018-01-07 17:03:49 +0100 | [diff] [blame] | 7392 | } |
| 7393 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7394 | void perf_prepare_sample(struct perf_event_header *header, |
| 7395 | struct perf_sample_data *data, |
| 7396 | struct perf_event *event, |
| 7397 | struct pt_regs *regs) |
| 7398 | { |
| 7399 | u64 sample_type = event->attr.sample_type; |
| 7400 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7401 | header->type = PERF_RECORD_SAMPLE; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 7402 | header->size = sizeof(*header) + event->header_size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7403 | |
| 7404 | header->misc = 0; |
| 7405 | header->misc |= perf_misc_flags(regs); |
| 7406 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7407 | __perf_event_header__init_id(header, data, event); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 7408 | |
Stephane Eranian | 995f088 | 2020-10-01 06:57:49 -0700 | [diff] [blame] | 7409 | if (sample_type & (PERF_SAMPLE_IP | PERF_SAMPLE_CODE_PAGE_SIZE)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7410 | data->ip = perf_instruction_pointer(regs); |
| 7411 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7412 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
| 7413 | int size = 1; |
| 7414 | |
Peter Zijlstra | 6cbc304 | 2018-05-10 15:48:41 +0200 | [diff] [blame] | 7415 | if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) |
| 7416 | data->callchain = perf_callchain(event, regs); |
| 7417 | |
Jiri Olsa | 99e818c | 2018-01-07 17:03:50 +0100 | [diff] [blame] | 7418 | size += data->callchain->nr; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7419 | |
| 7420 | header->size += size * sizeof(u64); |
| 7421 | } |
| 7422 | |
| 7423 | if (sample_type & PERF_SAMPLE_RAW) { |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 7424 | struct perf_raw_record *raw = data->raw; |
| 7425 | int size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7426 | |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 7427 | if (raw) { |
| 7428 | struct perf_raw_frag *frag = &raw->frag; |
| 7429 | u32 sum = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7430 | |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 7431 | do { |
| 7432 | sum += frag->size; |
| 7433 | if (perf_raw_frag_last(frag)) |
| 7434 | break; |
| 7435 | frag = frag->next; |
| 7436 | } while (1); |
| 7437 | |
| 7438 | size = round_up(sum + sizeof(u32), sizeof(u64)); |
| 7439 | raw->size = size - sizeof(u32); |
| 7440 | frag->pad = raw->size - sum; |
| 7441 | } else { |
| 7442 | size = sizeof(u64); |
| 7443 | } |
| 7444 | |
| 7445 | header->size += size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7446 | } |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 7447 | |
| 7448 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { |
| 7449 | int size = sizeof(u64); /* nr */ |
| 7450 | if (data->br_stack) { |
Kan Liang | bbfd5e4 | 2020-01-27 08:53:54 -0800 | [diff] [blame] | 7451 | if (perf_sample_save_hw_index(event)) |
| 7452 | size += sizeof(u64); |
| 7453 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 7454 | size += data->br_stack->nr |
| 7455 | * sizeof(struct perf_branch_entry); |
| 7456 | } |
| 7457 | header->size += size; |
| 7458 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 7459 | |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 7460 | if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) |
Peter Zijlstra | 76a4efa | 2020-10-30 12:14:21 +0100 | [diff] [blame] | 7461 | perf_sample_regs_user(&data->regs_user, regs); |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 7462 | |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 7463 | if (sample_type & PERF_SAMPLE_REGS_USER) { |
| 7464 | /* regs dump ABI info */ |
| 7465 | int size = sizeof(u64); |
| 7466 | |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 7467 | if (data->regs_user.regs) { |
| 7468 | u64 mask = event->attr.sample_regs_user; |
| 7469 | size += hweight64(mask) * sizeof(u64); |
| 7470 | } |
| 7471 | |
| 7472 | header->size += size; |
| 7473 | } |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 7474 | |
| 7475 | if (sample_type & PERF_SAMPLE_STACK_USER) { |
| 7476 | /* |
Roy Ben Shlomo | 9f014e3 | 2019-09-20 20:12:53 +0300 | [diff] [blame] | 7477 | * Either we need PERF_SAMPLE_STACK_USER bit to be always |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 7478 | * processed as the last one or have additional check added |
| 7479 | * in case new sample type is added, because we could eat |
| 7480 | * up the rest of the sample size. |
| 7481 | */ |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 7482 | u16 stack_size = event->attr.sample_stack_user; |
| 7483 | u16 size = sizeof(u64); |
| 7484 | |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 7485 | stack_size = perf_sample_ustack_size(stack_size, header->size, |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 7486 | data->regs_user.regs); |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 7487 | |
| 7488 | /* |
| 7489 | * If there is something to dump, add space for the dump |
| 7490 | * itself and for the field that tells the dynamic size, |
| 7491 | * which is how many have been actually dumped. |
| 7492 | */ |
| 7493 | if (stack_size) |
| 7494 | size += sizeof(u64) + stack_size; |
| 7495 | |
| 7496 | data->stack_user_size = stack_size; |
| 7497 | header->size += size; |
| 7498 | } |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 7499 | |
| 7500 | if (sample_type & PERF_SAMPLE_REGS_INTR) { |
| 7501 | /* regs dump ABI info */ |
| 7502 | int size = sizeof(u64); |
| 7503 | |
| 7504 | perf_sample_regs_intr(&data->regs_intr, regs); |
| 7505 | |
| 7506 | if (data->regs_intr.regs) { |
| 7507 | u64 mask = event->attr.sample_regs_intr; |
| 7508 | |
| 7509 | size += hweight64(mask) * sizeof(u64); |
| 7510 | } |
| 7511 | |
| 7512 | header->size += size; |
| 7513 | } |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 7514 | |
| 7515 | if (sample_type & PERF_SAMPLE_PHYS_ADDR) |
| 7516 | data->phys_addr = perf_virt_to_phys(data->addr); |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 7517 | |
Namhyung Kim | 6546b19 | 2020-03-25 21:45:29 +0900 | [diff] [blame] | 7518 | #ifdef CONFIG_CGROUP_PERF |
| 7519 | if (sample_type & PERF_SAMPLE_CGROUP) { |
| 7520 | struct cgroup *cgrp; |
| 7521 | |
| 7522 | /* protected by RCU */ |
| 7523 | cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup; |
| 7524 | data->cgroup = cgroup_id(cgrp); |
| 7525 | } |
| 7526 | #endif |
| 7527 | |
Kan Liang | 8d97e71 | 2020-10-01 06:57:46 -0700 | [diff] [blame] | 7528 | /* |
| 7529 | * PERF_DATA_PAGE_SIZE requires PERF_SAMPLE_ADDR. If the user doesn't |
| 7530 | * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr, |
| 7531 | * but the value will not dump to the userspace. |
| 7532 | */ |
| 7533 | if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) |
| 7534 | data->data_page_size = perf_get_page_size(data->addr); |
| 7535 | |
Stephane Eranian | 995f088 | 2020-10-01 06:57:49 -0700 | [diff] [blame] | 7536 | if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) |
| 7537 | data->code_page_size = perf_get_page_size(data->ip); |
| 7538 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 7539 | if (sample_type & PERF_SAMPLE_AUX) { |
| 7540 | u64 size; |
| 7541 | |
| 7542 | header->size += sizeof(u64); /* size */ |
| 7543 | |
| 7544 | /* |
| 7545 | * Given the 16bit nature of header::size, an AUX sample can |
| 7546 | * easily overflow it, what with all the preceding sample bits. |
| 7547 | * Make sure this doesn't happen by using up to U16_MAX bytes |
| 7548 | * per sample in total (rounded down to 8 byte boundary). |
| 7549 | */ |
| 7550 | size = min_t(size_t, U16_MAX - header->size, |
| 7551 | event->attr.aux_sample_size); |
| 7552 | size = rounddown(size, 8); |
| 7553 | size = perf_prepare_sample_aux(event, data, size); |
| 7554 | |
| 7555 | WARN_ON_ONCE(size + header->size > U16_MAX); |
| 7556 | header->size += size; |
| 7557 | } |
| 7558 | /* |
| 7559 | * If you're adding more sample types here, you likely need to do |
| 7560 | * something about the overflowing header::size, like repurpose the |
| 7561 | * lowest 3 bits of size, which should be always zero at the moment. |
| 7562 | * This raises a more important question, do we really need 512k sized |
| 7563 | * samples and why, so good argumentation is in order for whatever you |
| 7564 | * do here next. |
| 7565 | */ |
| 7566 | WARN_ON_ONCE(header->size & 7); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7567 | } |
| 7568 | |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 7569 | static __always_inline int |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 7570 | __perf_event_output(struct perf_event *event, |
| 7571 | struct perf_sample_data *data, |
| 7572 | struct pt_regs *regs, |
| 7573 | int (*output_begin)(struct perf_output_handle *, |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 7574 | struct perf_sample_data *, |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 7575 | struct perf_event *, |
| 7576 | unsigned int)) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7577 | { |
| 7578 | struct perf_output_handle handle; |
| 7579 | struct perf_event_header header; |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 7580 | int err; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7581 | |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 7582 | /* protect the callchain buffers */ |
| 7583 | rcu_read_lock(); |
| 7584 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7585 | perf_prepare_sample(&header, data, event, regs); |
| 7586 | |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 7587 | err = output_begin(&handle, data, event, header.size); |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 7588 | if (err) |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 7589 | goto exit; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7590 | |
| 7591 | perf_output_sample(&handle, &header, data, event); |
| 7592 | |
| 7593 | perf_output_end(&handle); |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 7594 | |
| 7595 | exit: |
| 7596 | rcu_read_unlock(); |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 7597 | return err; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7598 | } |
| 7599 | |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 7600 | void |
| 7601 | perf_event_output_forward(struct perf_event *event, |
| 7602 | struct perf_sample_data *data, |
| 7603 | struct pt_regs *regs) |
| 7604 | { |
| 7605 | __perf_event_output(event, data, regs, perf_output_begin_forward); |
| 7606 | } |
| 7607 | |
| 7608 | void |
| 7609 | perf_event_output_backward(struct perf_event *event, |
| 7610 | struct perf_sample_data *data, |
| 7611 | struct pt_regs *regs) |
| 7612 | { |
| 7613 | __perf_event_output(event, data, regs, perf_output_begin_backward); |
| 7614 | } |
| 7615 | |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 7616 | int |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 7617 | perf_event_output(struct perf_event *event, |
| 7618 | struct perf_sample_data *data, |
| 7619 | struct pt_regs *regs) |
| 7620 | { |
Arnaldo Carvalho de Melo | 5620196 | 2019-01-11 13:20:20 -0300 | [diff] [blame] | 7621 | return __perf_event_output(event, data, regs, perf_output_begin); |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 7622 | } |
| 7623 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7624 | /* |
| 7625 | * read event_id |
| 7626 | */ |
| 7627 | |
| 7628 | struct perf_read_event { |
| 7629 | struct perf_event_header header; |
| 7630 | |
| 7631 | u32 pid; |
| 7632 | u32 tid; |
| 7633 | }; |
| 7634 | |
| 7635 | static void |
| 7636 | perf_event_read_event(struct perf_event *event, |
| 7637 | struct task_struct *task) |
| 7638 | { |
| 7639 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7640 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7641 | struct perf_read_event read_event = { |
| 7642 | .header = { |
| 7643 | .type = PERF_RECORD_READ, |
| 7644 | .misc = 0, |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 7645 | .size = sizeof(read_event) + event->read_size, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7646 | }, |
| 7647 | .pid = perf_event_pid(event, task), |
| 7648 | .tid = perf_event_tid(event, task), |
| 7649 | }; |
| 7650 | int ret; |
| 7651 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7652 | perf_event_header__init_id(&read_event.header, &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 7653 | ret = perf_output_begin(&handle, &sample, event, read_event.header.size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7654 | if (ret) |
| 7655 | return; |
| 7656 | |
| 7657 | perf_output_put(&handle, read_event); |
| 7658 | perf_output_read(&handle, event); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7659 | perf_event__output_id_sample(event, &handle, &sample); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7660 | |
| 7661 | perf_output_end(&handle); |
| 7662 | } |
| 7663 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7664 | typedef void (perf_iterate_f)(struct perf_event *event, void *data); |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7665 | |
| 7666 | static void |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7667 | perf_iterate_ctx(struct perf_event_context *ctx, |
| 7668 | perf_iterate_f output, |
Alexander Shishkin | b73e4fe | 2016-04-27 18:44:45 +0300 | [diff] [blame] | 7669 | void *data, bool all) |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7670 | { |
| 7671 | struct perf_event *event; |
| 7672 | |
| 7673 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
Alexander Shishkin | b73e4fe | 2016-04-27 18:44:45 +0300 | [diff] [blame] | 7674 | if (!all) { |
| 7675 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 7676 | continue; |
| 7677 | if (!event_filter_match(event)) |
| 7678 | continue; |
| 7679 | } |
| 7680 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7681 | output(event, data); |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7682 | } |
| 7683 | } |
| 7684 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7685 | static void perf_iterate_sb_cpu(perf_iterate_f output, void *data) |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 7686 | { |
| 7687 | struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events); |
| 7688 | struct perf_event *event; |
| 7689 | |
| 7690 | list_for_each_entry_rcu(event, &pel->list, sb_list) { |
Peter Zijlstra | 0b8f1e2 | 2016-08-04 14:37:24 +0200 | [diff] [blame] | 7691 | /* |
| 7692 | * Skip events that are not fully formed yet; ensure that |
| 7693 | * if we observe event->ctx, both event and ctx will be |
| 7694 | * complete enough. See perf_install_in_context(). |
| 7695 | */ |
| 7696 | if (!smp_load_acquire(&event->ctx)) |
| 7697 | continue; |
| 7698 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 7699 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 7700 | continue; |
| 7701 | if (!event_filter_match(event)) |
| 7702 | continue; |
| 7703 | output(event, data); |
| 7704 | } |
| 7705 | } |
| 7706 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7707 | /* |
| 7708 | * Iterate all events that need to receive side-band events. |
| 7709 | * |
| 7710 | * For new callers; ensure that account_pmu_sb_event() includes |
| 7711 | * your event, otherwise it might not get delivered. |
| 7712 | */ |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 7713 | static void |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7714 | perf_iterate_sb(perf_iterate_f output, void *data, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7715 | struct perf_event_context *task_ctx) |
| 7716 | { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7717 | struct perf_event_context *ctx; |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7718 | int ctxn; |
| 7719 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7720 | rcu_read_lock(); |
| 7721 | preempt_disable(); |
| 7722 | |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 7723 | /* |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7724 | * If we have task_ctx != NULL we only notify the task context itself. |
| 7725 | * The task_ctx is set only for EXIT events before releasing task |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 7726 | * context. |
| 7727 | */ |
| 7728 | if (task_ctx) { |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7729 | perf_iterate_ctx(task_ctx, output, data, false); |
| 7730 | goto done; |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 7731 | } |
| 7732 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7733 | perf_iterate_sb_cpu(output, data); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 7734 | |
| 7735 | for_each_task_context_nr(ctxn) { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7736 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
| 7737 | if (ctx) |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7738 | perf_iterate_ctx(ctx, output, data, false); |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7739 | } |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7740 | done: |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 7741 | preempt_enable(); |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7742 | rcu_read_unlock(); |
| 7743 | } |
| 7744 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7745 | /* |
| 7746 | * Clear all file-based filters at exec, they'll have to be |
| 7747 | * re-instated when/if these objects are mmapped again. |
| 7748 | */ |
| 7749 | static void perf_event_addr_filters_exec(struct perf_event *event, void *data) |
| 7750 | { |
| 7751 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 7752 | struct perf_addr_filter *filter; |
| 7753 | unsigned int restart = 0, count = 0; |
| 7754 | unsigned long flags; |
| 7755 | |
| 7756 | if (!has_addr_filter(event)) |
| 7757 | return; |
| 7758 | |
| 7759 | raw_spin_lock_irqsave(&ifh->lock, flags); |
| 7760 | list_for_each_entry(filter, &ifh->list, entry) { |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 7761 | if (filter->path.dentry) { |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 7762 | event->addr_filter_ranges[count].start = 0; |
| 7763 | event->addr_filter_ranges[count].size = 0; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7764 | restart++; |
| 7765 | } |
| 7766 | |
| 7767 | count++; |
| 7768 | } |
| 7769 | |
| 7770 | if (restart) |
| 7771 | event->addr_filters_gen++; |
| 7772 | raw_spin_unlock_irqrestore(&ifh->lock, flags); |
| 7773 | |
| 7774 | if (restart) |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 7775 | perf_event_stop(event, 1); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7776 | } |
| 7777 | |
| 7778 | void perf_event_exec(void) |
| 7779 | { |
| 7780 | struct perf_event_context *ctx; |
| 7781 | int ctxn; |
| 7782 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7783 | for_each_task_context_nr(ctxn) { |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7784 | perf_event_enable_on_exec(ctxn); |
Marco Elver | 2e498d0 | 2021-04-08 12:35:59 +0200 | [diff] [blame] | 7785 | perf_event_remove_on_exec(ctxn); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7786 | |
Marco Elver | 2e498d0 | 2021-04-08 12:35:59 +0200 | [diff] [blame] | 7787 | rcu_read_lock(); |
| 7788 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
| 7789 | if (ctx) { |
| 7790 | perf_iterate_ctx(ctx, perf_event_addr_filters_exec, |
| 7791 | NULL, true); |
| 7792 | } |
| 7793 | rcu_read_unlock(); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7794 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7795 | } |
| 7796 | |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7797 | struct remote_output { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 7798 | struct perf_buffer *rb; |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7799 | int err; |
| 7800 | }; |
| 7801 | |
| 7802 | static void __perf_event_output_stop(struct perf_event *event, void *data) |
| 7803 | { |
| 7804 | struct perf_event *parent = event->parent; |
| 7805 | struct remote_output *ro = data; |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 7806 | struct perf_buffer *rb = ro->rb; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7807 | struct stop_event_data sd = { |
| 7808 | .event = event, |
| 7809 | }; |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7810 | |
| 7811 | if (!has_aux(event)) |
| 7812 | return; |
| 7813 | |
| 7814 | if (!parent) |
| 7815 | parent = event; |
| 7816 | |
| 7817 | /* |
| 7818 | * In case of inheritance, it will be the parent that links to the |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 7819 | * ring-buffer, but it will be the child that's actually using it. |
| 7820 | * |
| 7821 | * We are using event::rb to determine if the event should be stopped, |
| 7822 | * however this may race with ring_buffer_attach() (through set_output), |
| 7823 | * which will make us skip the event that actually needs to be stopped. |
| 7824 | * So ring_buffer_attach() has to stop an aux event before re-assigning |
| 7825 | * its rb pointer. |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7826 | */ |
| 7827 | if (rcu_dereference(parent->rb) == rb) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 7828 | ro->err = __perf_event_stop(&sd); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7829 | } |
| 7830 | |
| 7831 | static int __perf_pmu_output_stop(void *info) |
| 7832 | { |
| 7833 | struct perf_event *event = info; |
Alexander Shishkin | f3a519e | 2019-10-22 10:39:40 +0300 | [diff] [blame] | 7834 | struct pmu *pmu = event->ctx->pmu; |
Will Deacon | 8b6a3fe | 2016-08-24 10:07:14 +0100 | [diff] [blame] | 7835 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7836 | struct remote_output ro = { |
| 7837 | .rb = event->rb, |
| 7838 | }; |
| 7839 | |
| 7840 | rcu_read_lock(); |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7841 | perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7842 | if (cpuctx->task_ctx) |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7843 | perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop, |
Alexander Shishkin | b73e4fe | 2016-04-27 18:44:45 +0300 | [diff] [blame] | 7844 | &ro, false); |
Alexander Shishkin | 95ff4ca | 2015-12-02 18:41:11 +0200 | [diff] [blame] | 7845 | rcu_read_unlock(); |
| 7846 | |
| 7847 | return ro.err; |
| 7848 | } |
| 7849 | |
| 7850 | static void perf_pmu_output_stop(struct perf_event *event) |
| 7851 | { |
| 7852 | struct perf_event *iter; |
| 7853 | int err, cpu; |
| 7854 | |
| 7855 | restart: |
| 7856 | rcu_read_lock(); |
| 7857 | list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { |
| 7858 | /* |
| 7859 | * For per-CPU events, we need to make sure that neither they |
| 7860 | * nor their children are running; for cpu==-1 events it's |
| 7861 | * sufficient to stop the event itself if it's active, since |
| 7862 | * it can't have children. |
| 7863 | */ |
| 7864 | cpu = iter->cpu; |
| 7865 | if (cpu == -1) |
| 7866 | cpu = READ_ONCE(iter->oncpu); |
| 7867 | |
| 7868 | if (cpu == -1) |
| 7869 | continue; |
| 7870 | |
| 7871 | err = cpu_function_call(cpu, __perf_pmu_output_stop, event); |
| 7872 | if (err == -EAGAIN) { |
| 7873 | rcu_read_unlock(); |
| 7874 | goto restart; |
| 7875 | } |
| 7876 | } |
| 7877 | rcu_read_unlock(); |
| 7878 | } |
| 7879 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7880 | /* |
| 7881 | * task tracking -- fork/exit |
| 7882 | * |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7883 | * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7884 | */ |
| 7885 | |
| 7886 | struct perf_task_event { |
| 7887 | struct task_struct *task; |
| 7888 | struct perf_event_context *task_ctx; |
| 7889 | |
| 7890 | struct { |
| 7891 | struct perf_event_header header; |
| 7892 | |
| 7893 | u32 pid; |
| 7894 | u32 ppid; |
| 7895 | u32 tid; |
| 7896 | u32 ptid; |
| 7897 | u64 time; |
| 7898 | } event_id; |
| 7899 | }; |
| 7900 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7901 | static int perf_event_task_match(struct perf_event *event) |
| 7902 | { |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 7903 | return event->attr.comm || event->attr.mmap || |
| 7904 | event->attr.mmap2 || event->attr.mmap_data || |
| 7905 | event->attr.task; |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7906 | } |
| 7907 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7908 | static void perf_event_task_output(struct perf_event *event, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7909 | void *data) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7910 | { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7911 | struct perf_task_event *task_event = data; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7912 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7913 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7914 | struct task_struct *task = task_event->task; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7915 | int ret, size = task_event->event_id.header.size; |
Mike Galbraith | 8bb39f9 | 2010-03-26 11:11:33 +0100 | [diff] [blame] | 7916 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 7917 | if (!perf_event_task_match(event)) |
| 7918 | return; |
| 7919 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7920 | perf_event_header__init_id(&task_event->event_id.header, &sample, event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7921 | |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 7922 | ret = perf_output_begin(&handle, &sample, event, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 7923 | task_event->event_id.header.size); |
Peter Zijlstra | ef60777 | 2010-05-18 10:50:41 +0200 | [diff] [blame] | 7924 | if (ret) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7925 | goto out; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7926 | |
| 7927 | task_event->event_id.pid = perf_event_pid(event, task); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7928 | task_event->event_id.tid = perf_event_tid(event, task); |
Ian Rogers | f3bed55 | 2020-04-17 11:28:42 -0700 | [diff] [blame] | 7929 | |
| 7930 | if (task_event->event_id.header.type == PERF_RECORD_EXIT) { |
| 7931 | task_event->event_id.ppid = perf_event_pid(event, |
| 7932 | task->real_parent); |
| 7933 | task_event->event_id.ptid = perf_event_pid(event, |
| 7934 | task->real_parent); |
| 7935 | } else { /* PERF_RECORD_FORK */ |
| 7936 | task_event->event_id.ppid = perf_event_pid(event, current); |
| 7937 | task_event->event_id.ptid = perf_event_tid(event, current); |
| 7938 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7939 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 7940 | task_event->event_id.time = perf_event_clock(event); |
| 7941 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7942 | perf_output_put(&handle, task_event->event_id); |
| 7943 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7944 | perf_event__output_id_sample(event, &handle, &sample); |
| 7945 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7946 | perf_output_end(&handle); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 7947 | out: |
| 7948 | task_event->event_id.header.size = size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7949 | } |
| 7950 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7951 | static void perf_event_task(struct task_struct *task, |
| 7952 | struct perf_event_context *task_ctx, |
| 7953 | int new) |
| 7954 | { |
| 7955 | struct perf_task_event task_event; |
| 7956 | |
| 7957 | if (!atomic_read(&nr_comm_events) && |
| 7958 | !atomic_read(&nr_mmap_events) && |
| 7959 | !atomic_read(&nr_task_events)) |
| 7960 | return; |
| 7961 | |
| 7962 | task_event = (struct perf_task_event){ |
| 7963 | .task = task, |
| 7964 | .task_ctx = task_ctx, |
| 7965 | .event_id = { |
| 7966 | .header = { |
| 7967 | .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, |
| 7968 | .misc = 0, |
| 7969 | .size = sizeof(task_event.event_id), |
| 7970 | }, |
| 7971 | /* .pid */ |
| 7972 | /* .ppid */ |
| 7973 | /* .tid */ |
| 7974 | /* .ptid */ |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 7975 | /* .time */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7976 | }, |
| 7977 | }; |
| 7978 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 7979 | perf_iterate_sb(perf_event_task_output, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 7980 | &task_event, |
| 7981 | task_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7982 | } |
| 7983 | |
| 7984 | void perf_event_fork(struct task_struct *task) |
| 7985 | { |
| 7986 | perf_event_task(task, NULL, 1); |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 7987 | perf_event_namespaces(task); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 7988 | } |
| 7989 | |
| 7990 | /* |
| 7991 | * comm tracking |
| 7992 | */ |
| 7993 | |
| 7994 | struct perf_comm_event { |
| 7995 | struct task_struct *task; |
| 7996 | char *comm; |
| 7997 | int comm_size; |
| 7998 | |
| 7999 | struct { |
| 8000 | struct perf_event_header header; |
| 8001 | |
| 8002 | u32 pid; |
| 8003 | u32 tid; |
| 8004 | } event_id; |
| 8005 | }; |
| 8006 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 8007 | static int perf_event_comm_match(struct perf_event *event) |
| 8008 | { |
| 8009 | return event->attr.comm; |
| 8010 | } |
| 8011 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8012 | static void perf_event_comm_output(struct perf_event *event, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 8013 | void *data) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8014 | { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 8015 | struct perf_comm_event *comm_event = data; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8016 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8017 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8018 | int size = comm_event->event_id.header.size; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8019 | int ret; |
| 8020 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 8021 | if (!perf_event_comm_match(event)) |
| 8022 | return; |
| 8023 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8024 | perf_event_header__init_id(&comm_event->event_id.header, &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8025 | ret = perf_output_begin(&handle, &sample, event, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 8026 | comm_event->event_id.header.size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8027 | |
| 8028 | if (ret) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8029 | goto out; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8030 | |
| 8031 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); |
| 8032 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); |
| 8033 | |
| 8034 | perf_output_put(&handle, comm_event->event_id); |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 8035 | __output_copy(&handle, comm_event->comm, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8036 | comm_event->comm_size); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8037 | |
| 8038 | perf_event__output_id_sample(event, &handle, &sample); |
| 8039 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8040 | perf_output_end(&handle); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8041 | out: |
| 8042 | comm_event->event_id.header.size = size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8043 | } |
| 8044 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8045 | static void perf_event_comm_event(struct perf_comm_event *comm_event) |
| 8046 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8047 | char comm[TASK_COMM_LEN]; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8048 | unsigned int size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8049 | |
| 8050 | memset(comm, 0, sizeof(comm)); |
Márton Németh | 96b02d7 | 2009-11-21 23:10:15 +0100 | [diff] [blame] | 8051 | strlcpy(comm, comm_event->task->comm, sizeof(comm)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8052 | size = ALIGN(strlen(comm)+1, sizeof(u64)); |
| 8053 | |
| 8054 | comm_event->comm = comm; |
| 8055 | comm_event->comm_size = size; |
| 8056 | |
| 8057 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 8058 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 8059 | perf_iterate_sb(perf_event_comm_output, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 8060 | comm_event, |
| 8061 | NULL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8062 | } |
| 8063 | |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 8064 | void perf_event_comm(struct task_struct *task, bool exec) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8065 | { |
| 8066 | struct perf_comm_event comm_event; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8067 | |
| 8068 | if (!atomic_read(&nr_comm_events)) |
| 8069 | return; |
| 8070 | |
| 8071 | comm_event = (struct perf_comm_event){ |
| 8072 | .task = task, |
| 8073 | /* .comm */ |
| 8074 | /* .comm_size */ |
| 8075 | .event_id = { |
| 8076 | .header = { |
| 8077 | .type = PERF_RECORD_COMM, |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 8078 | .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8079 | /* .size */ |
| 8080 | }, |
| 8081 | /* .pid */ |
| 8082 | /* .tid */ |
| 8083 | }, |
| 8084 | }; |
| 8085 | |
| 8086 | perf_event_comm_event(&comm_event); |
| 8087 | } |
| 8088 | |
| 8089 | /* |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 8090 | * namespaces tracking |
| 8091 | */ |
| 8092 | |
| 8093 | struct perf_namespaces_event { |
| 8094 | struct task_struct *task; |
| 8095 | |
| 8096 | struct { |
| 8097 | struct perf_event_header header; |
| 8098 | |
| 8099 | u32 pid; |
| 8100 | u32 tid; |
| 8101 | u64 nr_namespaces; |
| 8102 | struct perf_ns_link_info link_info[NR_NAMESPACES]; |
| 8103 | } event_id; |
| 8104 | }; |
| 8105 | |
| 8106 | static int perf_event_namespaces_match(struct perf_event *event) |
| 8107 | { |
| 8108 | return event->attr.namespaces; |
| 8109 | } |
| 8110 | |
| 8111 | static void perf_event_namespaces_output(struct perf_event *event, |
| 8112 | void *data) |
| 8113 | { |
| 8114 | struct perf_namespaces_event *namespaces_event = data; |
| 8115 | struct perf_output_handle handle; |
| 8116 | struct perf_sample_data sample; |
Jiri Olsa | 34900ec | 2017-08-09 18:14:06 +0200 | [diff] [blame] | 8117 | u16 header_size = namespaces_event->event_id.header.size; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 8118 | int ret; |
| 8119 | |
| 8120 | if (!perf_event_namespaces_match(event)) |
| 8121 | return; |
| 8122 | |
| 8123 | perf_event_header__init_id(&namespaces_event->event_id.header, |
| 8124 | &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8125 | ret = perf_output_begin(&handle, &sample, event, |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 8126 | namespaces_event->event_id.header.size); |
| 8127 | if (ret) |
Jiri Olsa | 34900ec | 2017-08-09 18:14:06 +0200 | [diff] [blame] | 8128 | goto out; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 8129 | |
| 8130 | namespaces_event->event_id.pid = perf_event_pid(event, |
| 8131 | namespaces_event->task); |
| 8132 | namespaces_event->event_id.tid = perf_event_tid(event, |
| 8133 | namespaces_event->task); |
| 8134 | |
| 8135 | perf_output_put(&handle, namespaces_event->event_id); |
| 8136 | |
| 8137 | perf_event__output_id_sample(event, &handle, &sample); |
| 8138 | |
| 8139 | perf_output_end(&handle); |
Jiri Olsa | 34900ec | 2017-08-09 18:14:06 +0200 | [diff] [blame] | 8140 | out: |
| 8141 | namespaces_event->event_id.header.size = header_size; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 8142 | } |
| 8143 | |
| 8144 | static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info, |
| 8145 | struct task_struct *task, |
| 8146 | const struct proc_ns_operations *ns_ops) |
| 8147 | { |
| 8148 | struct path ns_path; |
| 8149 | struct inode *ns_inode; |
Aleksa Sarai | ce623f8 | 2019-12-07 01:13:27 +1100 | [diff] [blame] | 8150 | int error; |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 8151 | |
| 8152 | error = ns_get_path(&ns_path, task, ns_ops); |
| 8153 | if (!error) { |
| 8154 | ns_inode = ns_path.dentry->d_inode; |
| 8155 | ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev); |
| 8156 | ns_link_info->ino = ns_inode->i_ino; |
Vasily Averin | 0e18dd1 | 2017-11-15 08:47:02 +0300 | [diff] [blame] | 8157 | path_put(&ns_path); |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 8158 | } |
| 8159 | } |
| 8160 | |
| 8161 | void perf_event_namespaces(struct task_struct *task) |
| 8162 | { |
| 8163 | struct perf_namespaces_event namespaces_event; |
| 8164 | struct perf_ns_link_info *ns_link_info; |
| 8165 | |
| 8166 | if (!atomic_read(&nr_namespaces_events)) |
| 8167 | return; |
| 8168 | |
| 8169 | namespaces_event = (struct perf_namespaces_event){ |
| 8170 | .task = task, |
| 8171 | .event_id = { |
| 8172 | .header = { |
| 8173 | .type = PERF_RECORD_NAMESPACES, |
| 8174 | .misc = 0, |
| 8175 | .size = sizeof(namespaces_event.event_id), |
| 8176 | }, |
| 8177 | /* .pid */ |
| 8178 | /* .tid */ |
| 8179 | .nr_namespaces = NR_NAMESPACES, |
| 8180 | /* .link_info[NR_NAMESPACES] */ |
| 8181 | }, |
| 8182 | }; |
| 8183 | |
| 8184 | ns_link_info = namespaces_event.event_id.link_info; |
| 8185 | |
| 8186 | perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX], |
| 8187 | task, &mntns_operations); |
| 8188 | |
| 8189 | #ifdef CONFIG_USER_NS |
| 8190 | perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX], |
| 8191 | task, &userns_operations); |
| 8192 | #endif |
| 8193 | #ifdef CONFIG_NET_NS |
| 8194 | perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX], |
| 8195 | task, &netns_operations); |
| 8196 | #endif |
| 8197 | #ifdef CONFIG_UTS_NS |
| 8198 | perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX], |
| 8199 | task, &utsns_operations); |
| 8200 | #endif |
| 8201 | #ifdef CONFIG_IPC_NS |
| 8202 | perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX], |
| 8203 | task, &ipcns_operations); |
| 8204 | #endif |
| 8205 | #ifdef CONFIG_PID_NS |
| 8206 | perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX], |
| 8207 | task, &pidns_operations); |
| 8208 | #endif |
| 8209 | #ifdef CONFIG_CGROUPS |
| 8210 | perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX], |
| 8211 | task, &cgroupns_operations); |
| 8212 | #endif |
| 8213 | |
| 8214 | perf_iterate_sb(perf_event_namespaces_output, |
| 8215 | &namespaces_event, |
| 8216 | NULL); |
| 8217 | } |
| 8218 | |
| 8219 | /* |
Namhyung Kim | 96aaab6 | 2020-03-25 21:45:28 +0900 | [diff] [blame] | 8220 | * cgroup tracking |
| 8221 | */ |
| 8222 | #ifdef CONFIG_CGROUP_PERF |
| 8223 | |
| 8224 | struct perf_cgroup_event { |
| 8225 | char *path; |
| 8226 | int path_size; |
| 8227 | struct { |
| 8228 | struct perf_event_header header; |
| 8229 | u64 id; |
| 8230 | char path[]; |
| 8231 | } event_id; |
| 8232 | }; |
| 8233 | |
| 8234 | static int perf_event_cgroup_match(struct perf_event *event) |
| 8235 | { |
| 8236 | return event->attr.cgroup; |
| 8237 | } |
| 8238 | |
| 8239 | static void perf_event_cgroup_output(struct perf_event *event, void *data) |
| 8240 | { |
| 8241 | struct perf_cgroup_event *cgroup_event = data; |
| 8242 | struct perf_output_handle handle; |
| 8243 | struct perf_sample_data sample; |
| 8244 | u16 header_size = cgroup_event->event_id.header.size; |
| 8245 | int ret; |
| 8246 | |
| 8247 | if (!perf_event_cgroup_match(event)) |
| 8248 | return; |
| 8249 | |
| 8250 | perf_event_header__init_id(&cgroup_event->event_id.header, |
| 8251 | &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8252 | ret = perf_output_begin(&handle, &sample, event, |
Namhyung Kim | 96aaab6 | 2020-03-25 21:45:28 +0900 | [diff] [blame] | 8253 | cgroup_event->event_id.header.size); |
| 8254 | if (ret) |
| 8255 | goto out; |
| 8256 | |
| 8257 | perf_output_put(&handle, cgroup_event->event_id); |
| 8258 | __output_copy(&handle, cgroup_event->path, cgroup_event->path_size); |
| 8259 | |
| 8260 | perf_event__output_id_sample(event, &handle, &sample); |
| 8261 | |
| 8262 | perf_output_end(&handle); |
| 8263 | out: |
| 8264 | cgroup_event->event_id.header.size = header_size; |
| 8265 | } |
| 8266 | |
| 8267 | static void perf_event_cgroup(struct cgroup *cgrp) |
| 8268 | { |
| 8269 | struct perf_cgroup_event cgroup_event; |
| 8270 | char path_enomem[16] = "//enomem"; |
| 8271 | char *pathname; |
| 8272 | size_t size; |
| 8273 | |
| 8274 | if (!atomic_read(&nr_cgroup_events)) |
| 8275 | return; |
| 8276 | |
| 8277 | cgroup_event = (struct perf_cgroup_event){ |
| 8278 | .event_id = { |
| 8279 | .header = { |
| 8280 | .type = PERF_RECORD_CGROUP, |
| 8281 | .misc = 0, |
| 8282 | .size = sizeof(cgroup_event.event_id), |
| 8283 | }, |
| 8284 | .id = cgroup_id(cgrp), |
| 8285 | }, |
| 8286 | }; |
| 8287 | |
| 8288 | pathname = kmalloc(PATH_MAX, GFP_KERNEL); |
| 8289 | if (pathname == NULL) { |
| 8290 | cgroup_event.path = path_enomem; |
| 8291 | } else { |
| 8292 | /* just to be sure to have enough space for alignment */ |
| 8293 | cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64)); |
| 8294 | cgroup_event.path = pathname; |
| 8295 | } |
| 8296 | |
| 8297 | /* |
| 8298 | * Since our buffer works in 8 byte units we need to align our string |
| 8299 | * size to a multiple of 8. However, we must guarantee the tail end is |
| 8300 | * zero'd out to avoid leaking random bits to userspace. |
| 8301 | */ |
| 8302 | size = strlen(cgroup_event.path) + 1; |
| 8303 | while (!IS_ALIGNED(size, sizeof(u64))) |
| 8304 | cgroup_event.path[size++] = '\0'; |
| 8305 | |
| 8306 | cgroup_event.event_id.header.size += size; |
| 8307 | cgroup_event.path_size = size; |
| 8308 | |
| 8309 | perf_iterate_sb(perf_event_cgroup_output, |
| 8310 | &cgroup_event, |
| 8311 | NULL); |
| 8312 | |
| 8313 | kfree(pathname); |
| 8314 | } |
| 8315 | |
| 8316 | #endif |
| 8317 | |
| 8318 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8319 | * mmap tracking |
| 8320 | */ |
| 8321 | |
| 8322 | struct perf_mmap_event { |
| 8323 | struct vm_area_struct *vma; |
| 8324 | |
| 8325 | const char *file_name; |
| 8326 | int file_size; |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8327 | int maj, min; |
| 8328 | u64 ino; |
| 8329 | u64 ino_generation; |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 8330 | u32 prot, flags; |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 8331 | u8 build_id[BUILD_ID_SIZE_MAX]; |
| 8332 | u32 build_id_size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8333 | |
| 8334 | struct { |
| 8335 | struct perf_event_header header; |
| 8336 | |
| 8337 | u32 pid; |
| 8338 | u32 tid; |
| 8339 | u64 start; |
| 8340 | u64 len; |
| 8341 | u64 pgoff; |
| 8342 | } event_id; |
| 8343 | }; |
| 8344 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 8345 | static int perf_event_mmap_match(struct perf_event *event, |
| 8346 | void *data) |
| 8347 | { |
| 8348 | struct perf_mmap_event *mmap_event = data; |
| 8349 | struct vm_area_struct *vma = mmap_event->vma; |
| 8350 | int executable = vma->vm_flags & VM_EXEC; |
| 8351 | |
| 8352 | return (!executable && event->attr.mmap_data) || |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8353 | (executable && (event->attr.mmap || event->attr.mmap2)); |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 8354 | } |
| 8355 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8356 | static void perf_event_mmap_output(struct perf_event *event, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 8357 | void *data) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8358 | { |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 8359 | struct perf_mmap_event *mmap_event = data; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8360 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8361 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8362 | int size = mmap_event->event_id.header.size; |
Stephane Eranian | d9c1bb2 | 2019-03-07 10:52:33 -0800 | [diff] [blame] | 8363 | u32 type = mmap_event->event_id.header.type; |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 8364 | bool use_build_id; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8365 | int ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8366 | |
Jiri Olsa | 6751684 | 2013-07-09 18:56:31 +0200 | [diff] [blame] | 8367 | if (!perf_event_mmap_match(event, data)) |
| 8368 | return; |
| 8369 | |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8370 | if (event->attr.mmap2) { |
| 8371 | mmap_event->event_id.header.type = PERF_RECORD_MMAP2; |
| 8372 | mmap_event->event_id.header.size += sizeof(mmap_event->maj); |
| 8373 | mmap_event->event_id.header.size += sizeof(mmap_event->min); |
| 8374 | mmap_event->event_id.header.size += sizeof(mmap_event->ino); |
Arnaldo Carvalho de Melo | d008d52 | 2013-09-10 10:24:05 -0300 | [diff] [blame] | 8375 | mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 8376 | mmap_event->event_id.header.size += sizeof(mmap_event->prot); |
| 8377 | mmap_event->event_id.header.size += sizeof(mmap_event->flags); |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8378 | } |
| 8379 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8380 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8381 | ret = perf_output_begin(&handle, &sample, event, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 8382 | mmap_event->event_id.header.size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8383 | if (ret) |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8384 | goto out; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8385 | |
| 8386 | mmap_event->event_id.pid = perf_event_pid(event, current); |
| 8387 | mmap_event->event_id.tid = perf_event_tid(event, current); |
| 8388 | |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 8389 | use_build_id = event->attr.build_id && mmap_event->build_id_size; |
| 8390 | |
| 8391 | if (event->attr.mmap2 && use_build_id) |
| 8392 | mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID; |
| 8393 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8394 | perf_output_put(&handle, mmap_event->event_id); |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8395 | |
| 8396 | if (event->attr.mmap2) { |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 8397 | if (use_build_id) { |
| 8398 | u8 size[4] = { (u8) mmap_event->build_id_size, 0, 0, 0 }; |
| 8399 | |
| 8400 | __output_copy(&handle, size, 4); |
| 8401 | __output_copy(&handle, mmap_event->build_id, BUILD_ID_SIZE_MAX); |
| 8402 | } else { |
| 8403 | perf_output_put(&handle, mmap_event->maj); |
| 8404 | perf_output_put(&handle, mmap_event->min); |
| 8405 | perf_output_put(&handle, mmap_event->ino); |
| 8406 | perf_output_put(&handle, mmap_event->ino_generation); |
| 8407 | } |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 8408 | perf_output_put(&handle, mmap_event->prot); |
| 8409 | perf_output_put(&handle, mmap_event->flags); |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8410 | } |
| 8411 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 8412 | __output_copy(&handle, mmap_event->file_name, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8413 | mmap_event->file_size); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8414 | |
| 8415 | perf_event__output_id_sample(event, &handle, &sample); |
| 8416 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8417 | perf_output_end(&handle); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8418 | out: |
| 8419 | mmap_event->event_id.header.size = size; |
Stephane Eranian | d9c1bb2 | 2019-03-07 10:52:33 -0800 | [diff] [blame] | 8420 | mmap_event->event_id.header.type = type; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8421 | } |
| 8422 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8423 | static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) |
| 8424 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8425 | struct vm_area_struct *vma = mmap_event->vma; |
| 8426 | struct file *file = vma->vm_file; |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8427 | int maj = 0, min = 0; |
| 8428 | u64 ino = 0, gen = 0; |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 8429 | u32 prot = 0, flags = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8430 | unsigned int size; |
| 8431 | char tmp[16]; |
| 8432 | char *buf = NULL; |
Peter Zijlstra | 2c42cfbf | 2013-10-17 00:06:46 +0200 | [diff] [blame] | 8433 | char *name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8434 | |
Peter Zijlstra | 0b3589b | 2017-01-26 23:15:08 +0100 | [diff] [blame] | 8435 | if (vma->vm_flags & VM_READ) |
| 8436 | prot |= PROT_READ; |
| 8437 | if (vma->vm_flags & VM_WRITE) |
| 8438 | prot |= PROT_WRITE; |
| 8439 | if (vma->vm_flags & VM_EXEC) |
| 8440 | prot |= PROT_EXEC; |
| 8441 | |
| 8442 | if (vma->vm_flags & VM_MAYSHARE) |
| 8443 | flags = MAP_SHARED; |
| 8444 | else |
| 8445 | flags = MAP_PRIVATE; |
| 8446 | |
Peter Zijlstra | 0b3589b | 2017-01-26 23:15:08 +0100 | [diff] [blame] | 8447 | if (vma->vm_flags & VM_LOCKED) |
| 8448 | flags |= MAP_LOCKED; |
Anshuman Khandual | 0391113 | 2020-04-06 20:03:51 -0700 | [diff] [blame] | 8449 | if (is_vm_hugetlb_page(vma)) |
Peter Zijlstra | 0b3589b | 2017-01-26 23:15:08 +0100 | [diff] [blame] | 8450 | flags |= MAP_HUGETLB; |
| 8451 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8452 | if (file) { |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8453 | struct inode *inode; |
| 8454 | dev_t dev; |
Oleg Nesterov | 3ea2f2b | 2013-10-16 22:10:04 +0200 | [diff] [blame] | 8455 | |
Peter Zijlstra | 2c42cfbf | 2013-10-17 00:06:46 +0200 | [diff] [blame] | 8456 | buf = kmalloc(PATH_MAX, GFP_KERNEL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8457 | if (!buf) { |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 8458 | name = "//enomem"; |
| 8459 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8460 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8461 | /* |
Oleg Nesterov | 3ea2f2b | 2013-10-16 22:10:04 +0200 | [diff] [blame] | 8462 | * d_path() works from the end of the rb backwards, so we |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8463 | * need to add enough zero bytes after the string to handle |
| 8464 | * the 64bit alignment we do later. |
| 8465 | */ |
Miklos Szeredi | 9bf39ab | 2015-06-19 10:29:13 +0200 | [diff] [blame] | 8466 | name = file_path(file, buf, PATH_MAX - sizeof(u64)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8467 | if (IS_ERR(name)) { |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 8468 | name = "//toolong"; |
| 8469 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8470 | } |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8471 | inode = file_inode(vma->vm_file); |
| 8472 | dev = inode->i_sb->s_dev; |
| 8473 | ino = inode->i_ino; |
| 8474 | gen = inode->i_generation; |
| 8475 | maj = MAJOR(dev); |
| 8476 | min = MINOR(dev); |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 8477 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8478 | goto got_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8479 | } else { |
Jiri Olsa | fbe26ab | 2014-07-14 17:57:19 +0200 | [diff] [blame] | 8480 | if (vma->vm_ops && vma->vm_ops->name) { |
| 8481 | name = (char *) vma->vm_ops->name(vma); |
| 8482 | if (name) |
| 8483 | goto cpy_name; |
| 8484 | } |
| 8485 | |
Peter Zijlstra | 2c42cfbf | 2013-10-17 00:06:46 +0200 | [diff] [blame] | 8486 | name = (char *)arch_vma_name(vma); |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 8487 | if (name) |
| 8488 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8489 | |
Oleg Nesterov | 32c5fb7 | 2013-10-16 22:09:45 +0200 | [diff] [blame] | 8490 | if (vma->vm_start <= vma->vm_mm->start_brk && |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8491 | vma->vm_end >= vma->vm_mm->brk) { |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 8492 | name = "[heap]"; |
| 8493 | goto cpy_name; |
Oleg Nesterov | 32c5fb7 | 2013-10-16 22:09:45 +0200 | [diff] [blame] | 8494 | } |
| 8495 | if (vma->vm_start <= vma->vm_mm->start_stack && |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8496 | vma->vm_end >= vma->vm_mm->start_stack) { |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 8497 | name = "[stack]"; |
| 8498 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8499 | } |
| 8500 | |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 8501 | name = "//anon"; |
| 8502 | goto cpy_name; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8503 | } |
| 8504 | |
Oleg Nesterov | c7e548b | 2013-10-17 20:24:17 +0200 | [diff] [blame] | 8505 | cpy_name: |
| 8506 | strlcpy(tmp, name, sizeof(tmp)); |
| 8507 | name = tmp; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8508 | got_name: |
Peter Zijlstra | 2c42cfbf | 2013-10-17 00:06:46 +0200 | [diff] [blame] | 8509 | /* |
| 8510 | * Since our buffer works in 8 byte units we need to align our string |
| 8511 | * size to a multiple of 8. However, we must guarantee the tail end is |
| 8512 | * zero'd out to avoid leaking random bits to userspace. |
| 8513 | */ |
| 8514 | size = strlen(name)+1; |
| 8515 | while (!IS_ALIGNED(size, sizeof(u64))) |
| 8516 | name[size++] = '\0'; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8517 | |
| 8518 | mmap_event->file_name = name; |
| 8519 | mmap_event->file_size = size; |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8520 | mmap_event->maj = maj; |
| 8521 | mmap_event->min = min; |
| 8522 | mmap_event->ino = ino; |
| 8523 | mmap_event->ino_generation = gen; |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 8524 | mmap_event->prot = prot; |
| 8525 | mmap_event->flags = flags; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8526 | |
Stephane Eranian | 2fe8542 | 2013-01-24 16:10:39 +0100 | [diff] [blame] | 8527 | if (!(vma->vm_flags & VM_EXEC)) |
| 8528 | mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; |
| 8529 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8530 | mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; |
| 8531 | |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 8532 | if (atomic_read(&nr_build_id_events)) |
| 8533 | build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size); |
| 8534 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 8535 | perf_iterate_sb(perf_event_mmap_output, |
Jiri Olsa | 52d857a | 2013-05-06 18:27:18 +0200 | [diff] [blame] | 8536 | mmap_event, |
| 8537 | NULL); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8538 | |
| 8539 | kfree(buf); |
| 8540 | } |
| 8541 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8542 | /* |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8543 | * Check whether inode and address range match filter criteria. |
| 8544 | */ |
| 8545 | static bool perf_addr_filter_match(struct perf_addr_filter *filter, |
| 8546 | struct file *file, unsigned long offset, |
| 8547 | unsigned long size) |
| 8548 | { |
Mathieu Poirier | 7f635ff | 2018-07-16 17:13:51 -0600 | [diff] [blame] | 8549 | /* d_inode(NULL) won't be equal to any mapped user-space file */ |
| 8550 | if (!filter->path.dentry) |
| 8551 | return false; |
| 8552 | |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 8553 | if (d_inode(filter->path.dentry) != file_inode(file)) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8554 | return false; |
| 8555 | |
| 8556 | if (filter->offset > offset + size) |
| 8557 | return false; |
| 8558 | |
| 8559 | if (filter->offset + filter->size < offset) |
| 8560 | return false; |
| 8561 | |
| 8562 | return true; |
| 8563 | } |
| 8564 | |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 8565 | static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter, |
| 8566 | struct vm_area_struct *vma, |
| 8567 | struct perf_addr_filter_range *fr) |
| 8568 | { |
| 8569 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
| 8570 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; |
| 8571 | struct file *file = vma->vm_file; |
| 8572 | |
| 8573 | if (!perf_addr_filter_match(filter, file, off, vma_size)) |
| 8574 | return false; |
| 8575 | |
| 8576 | if (filter->offset < off) { |
| 8577 | fr->start = vma->vm_start; |
| 8578 | fr->size = min(vma_size, filter->size - (off - filter->offset)); |
| 8579 | } else { |
| 8580 | fr->start = vma->vm_start + filter->offset - off; |
| 8581 | fr->size = min(vma->vm_end - fr->start, filter->size); |
| 8582 | } |
| 8583 | |
| 8584 | return true; |
| 8585 | } |
| 8586 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8587 | static void __perf_addr_filters_adjust(struct perf_event *event, void *data) |
| 8588 | { |
| 8589 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 8590 | struct vm_area_struct *vma = data; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8591 | struct perf_addr_filter *filter; |
| 8592 | unsigned int restart = 0, count = 0; |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 8593 | unsigned long flags; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8594 | |
| 8595 | if (!has_addr_filter(event)) |
| 8596 | return; |
| 8597 | |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 8598 | if (!vma->vm_file) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8599 | return; |
| 8600 | |
| 8601 | raw_spin_lock_irqsave(&ifh->lock, flags); |
| 8602 | list_for_each_entry(filter, &ifh->list, entry) { |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 8603 | if (perf_addr_filter_vma_adjust(filter, vma, |
| 8604 | &event->addr_filter_ranges[count])) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8605 | restart++; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8606 | |
| 8607 | count++; |
| 8608 | } |
| 8609 | |
| 8610 | if (restart) |
| 8611 | event->addr_filters_gen++; |
| 8612 | raw_spin_unlock_irqrestore(&ifh->lock, flags); |
| 8613 | |
| 8614 | if (restart) |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 8615 | perf_event_stop(event, 1); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8616 | } |
| 8617 | |
| 8618 | /* |
| 8619 | * Adjust all task's events' filters to the new vma |
| 8620 | */ |
| 8621 | static void perf_addr_filters_adjust(struct vm_area_struct *vma) |
| 8622 | { |
| 8623 | struct perf_event_context *ctx; |
| 8624 | int ctxn; |
| 8625 | |
Mathieu Poirier | 12b40a2 | 2016-07-18 10:43:06 -0600 | [diff] [blame] | 8626 | /* |
| 8627 | * Data tracing isn't supported yet and as such there is no need |
| 8628 | * to keep track of anything that isn't related to executable code: |
| 8629 | */ |
| 8630 | if (!(vma->vm_flags & VM_EXEC)) |
| 8631 | return; |
| 8632 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8633 | rcu_read_lock(); |
| 8634 | for_each_task_context_nr(ctxn) { |
| 8635 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
| 8636 | if (!ctx) |
| 8637 | continue; |
| 8638 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 8639 | perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8640 | } |
| 8641 | rcu_read_unlock(); |
| 8642 | } |
| 8643 | |
Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 8644 | void perf_event_mmap(struct vm_area_struct *vma) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8645 | { |
| 8646 | struct perf_mmap_event mmap_event; |
| 8647 | |
| 8648 | if (!atomic_read(&nr_mmap_events)) |
| 8649 | return; |
| 8650 | |
| 8651 | mmap_event = (struct perf_mmap_event){ |
| 8652 | .vma = vma, |
| 8653 | /* .file_name */ |
| 8654 | /* .file_size */ |
| 8655 | .event_id = { |
| 8656 | .header = { |
| 8657 | .type = PERF_RECORD_MMAP, |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 8658 | .misc = PERF_RECORD_MISC_USER, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8659 | /* .size */ |
| 8660 | }, |
| 8661 | /* .pid */ |
| 8662 | /* .tid */ |
| 8663 | .start = vma->vm_start, |
| 8664 | .len = vma->vm_end - vma->vm_start, |
Peter Zijlstra | 3a0304e | 2010-02-26 10:33:41 +0100 | [diff] [blame] | 8665 | .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8666 | }, |
Stephane Eranian | 13d7a24 | 2013-08-21 12:10:24 +0200 | [diff] [blame] | 8667 | /* .maj (attr_mmap2 only) */ |
| 8668 | /* .min (attr_mmap2 only) */ |
| 8669 | /* .ino (attr_mmap2 only) */ |
| 8670 | /* .ino_generation (attr_mmap2 only) */ |
Peter Zijlstra | f972eb6 | 2014-05-19 15:13:47 -0400 | [diff] [blame] | 8671 | /* .prot (attr_mmap2 only) */ |
| 8672 | /* .flags (attr_mmap2 only) */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8673 | }; |
| 8674 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 8675 | perf_addr_filters_adjust(vma); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8676 | perf_event_mmap_event(&mmap_event); |
| 8677 | } |
| 8678 | |
Alexander Shishkin | 68db7e9 | 2015-01-14 14:18:15 +0200 | [diff] [blame] | 8679 | void perf_event_aux_event(struct perf_event *event, unsigned long head, |
| 8680 | unsigned long size, u64 flags) |
| 8681 | { |
| 8682 | struct perf_output_handle handle; |
| 8683 | struct perf_sample_data sample; |
| 8684 | struct perf_aux_event { |
| 8685 | struct perf_event_header header; |
| 8686 | u64 offset; |
| 8687 | u64 size; |
| 8688 | u64 flags; |
| 8689 | } rec = { |
| 8690 | .header = { |
| 8691 | .type = PERF_RECORD_AUX, |
| 8692 | .misc = 0, |
| 8693 | .size = sizeof(rec), |
| 8694 | }, |
| 8695 | .offset = head, |
| 8696 | .size = size, |
| 8697 | .flags = flags, |
| 8698 | }; |
| 8699 | int ret; |
| 8700 | |
| 8701 | perf_event_header__init_id(&rec.header, &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8702 | ret = perf_output_begin(&handle, &sample, event, rec.header.size); |
Alexander Shishkin | 68db7e9 | 2015-01-14 14:18:15 +0200 | [diff] [blame] | 8703 | |
| 8704 | if (ret) |
| 8705 | return; |
| 8706 | |
| 8707 | perf_output_put(&handle, rec); |
| 8708 | perf_event__output_id_sample(event, &handle, &sample); |
| 8709 | |
| 8710 | perf_output_end(&handle); |
| 8711 | } |
| 8712 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8713 | /* |
Kan Liang | f38b0db | 2015-05-10 15:13:14 -0400 | [diff] [blame] | 8714 | * Lost/dropped samples logging |
| 8715 | */ |
| 8716 | void perf_log_lost_samples(struct perf_event *event, u64 lost) |
| 8717 | { |
| 8718 | struct perf_output_handle handle; |
| 8719 | struct perf_sample_data sample; |
| 8720 | int ret; |
| 8721 | |
| 8722 | struct { |
| 8723 | struct perf_event_header header; |
| 8724 | u64 lost; |
| 8725 | } lost_samples_event = { |
| 8726 | .header = { |
| 8727 | .type = PERF_RECORD_LOST_SAMPLES, |
| 8728 | .misc = 0, |
| 8729 | .size = sizeof(lost_samples_event), |
| 8730 | }, |
| 8731 | .lost = lost, |
| 8732 | }; |
| 8733 | |
| 8734 | perf_event_header__init_id(&lost_samples_event.header, &sample, event); |
| 8735 | |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8736 | ret = perf_output_begin(&handle, &sample, event, |
Kan Liang | f38b0db | 2015-05-10 15:13:14 -0400 | [diff] [blame] | 8737 | lost_samples_event.header.size); |
| 8738 | if (ret) |
| 8739 | return; |
| 8740 | |
| 8741 | perf_output_put(&handle, lost_samples_event); |
| 8742 | perf_event__output_id_sample(event, &handle, &sample); |
| 8743 | perf_output_end(&handle); |
| 8744 | } |
| 8745 | |
| 8746 | /* |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 8747 | * context_switch tracking |
| 8748 | */ |
| 8749 | |
| 8750 | struct perf_switch_event { |
| 8751 | struct task_struct *task; |
| 8752 | struct task_struct *next_prev; |
| 8753 | |
| 8754 | struct { |
| 8755 | struct perf_event_header header; |
| 8756 | u32 next_prev_pid; |
| 8757 | u32 next_prev_tid; |
| 8758 | } event_id; |
| 8759 | }; |
| 8760 | |
| 8761 | static int perf_event_switch_match(struct perf_event *event) |
| 8762 | { |
| 8763 | return event->attr.context_switch; |
| 8764 | } |
| 8765 | |
| 8766 | static void perf_event_switch_output(struct perf_event *event, void *data) |
| 8767 | { |
| 8768 | struct perf_switch_event *se = data; |
| 8769 | struct perf_output_handle handle; |
| 8770 | struct perf_sample_data sample; |
| 8771 | int ret; |
| 8772 | |
| 8773 | if (!perf_event_switch_match(event)) |
| 8774 | return; |
| 8775 | |
| 8776 | /* Only CPU-wide events are allowed to see next/prev pid/tid */ |
| 8777 | if (event->ctx->task) { |
| 8778 | se->event_id.header.type = PERF_RECORD_SWITCH; |
| 8779 | se->event_id.header.size = sizeof(se->event_id.header); |
| 8780 | } else { |
| 8781 | se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE; |
| 8782 | se->event_id.header.size = sizeof(se->event_id); |
| 8783 | se->event_id.next_prev_pid = |
| 8784 | perf_event_pid(event, se->next_prev); |
| 8785 | se->event_id.next_prev_tid = |
| 8786 | perf_event_tid(event, se->next_prev); |
| 8787 | } |
| 8788 | |
| 8789 | perf_event_header__init_id(&se->event_id.header, &sample, event); |
| 8790 | |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8791 | ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 8792 | if (ret) |
| 8793 | return; |
| 8794 | |
| 8795 | if (event->ctx->task) |
| 8796 | perf_output_put(&handle, se->event_id.header); |
| 8797 | else |
| 8798 | perf_output_put(&handle, se->event_id); |
| 8799 | |
| 8800 | perf_event__output_id_sample(event, &handle, &sample); |
| 8801 | |
| 8802 | perf_output_end(&handle); |
| 8803 | } |
| 8804 | |
| 8805 | static void perf_event_switch(struct task_struct *task, |
| 8806 | struct task_struct *next_prev, bool sched_in) |
| 8807 | { |
| 8808 | struct perf_switch_event switch_event; |
| 8809 | |
| 8810 | /* N.B. caller checks nr_switch_events != 0 */ |
| 8811 | |
| 8812 | switch_event = (struct perf_switch_event){ |
| 8813 | .task = task, |
| 8814 | .next_prev = next_prev, |
| 8815 | .event_id = { |
| 8816 | .header = { |
| 8817 | /* .type */ |
| 8818 | .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT, |
| 8819 | /* .size */ |
| 8820 | }, |
| 8821 | /* .next_prev_pid */ |
| 8822 | /* .next_prev_tid */ |
| 8823 | }, |
| 8824 | }; |
| 8825 | |
Peter Zijlstra | 3ba9f93 | 2021-06-11 10:28:13 +0200 | [diff] [blame] | 8826 | if (!sched_in && task->on_rq) { |
Alexey Budankov | 101592b | 2018-04-09 10:25:32 +0300 | [diff] [blame] | 8827 | switch_event.event_id.header.misc |= |
| 8828 | PERF_RECORD_MISC_SWITCH_OUT_PREEMPT; |
Peter Zijlstra | 3ba9f93 | 2021-06-11 10:28:13 +0200 | [diff] [blame] | 8829 | } |
Alexey Budankov | 101592b | 2018-04-09 10:25:32 +0300 | [diff] [blame] | 8830 | |
Peter Zijlstra | 3ba9f93 | 2021-06-11 10:28:13 +0200 | [diff] [blame] | 8831 | perf_iterate_sb(perf_event_switch_output, &switch_event, NULL); |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 8832 | } |
| 8833 | |
| 8834 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8835 | * IRQ throttle logging |
| 8836 | */ |
| 8837 | |
| 8838 | static void perf_log_throttle(struct perf_event *event, int enable) |
| 8839 | { |
| 8840 | struct perf_output_handle handle; |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8841 | struct perf_sample_data sample; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8842 | int ret; |
| 8843 | |
| 8844 | struct { |
| 8845 | struct perf_event_header header; |
| 8846 | u64 time; |
| 8847 | u64 id; |
| 8848 | u64 stream_id; |
| 8849 | } throttle_event = { |
| 8850 | .header = { |
| 8851 | .type = PERF_RECORD_THROTTLE, |
| 8852 | .misc = 0, |
| 8853 | .size = sizeof(throttle_event), |
| 8854 | }, |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 8855 | .time = perf_event_clock(event), |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8856 | .id = primary_event_id(event), |
| 8857 | .stream_id = event->id, |
| 8858 | }; |
| 8859 | |
| 8860 | if (enable) |
| 8861 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; |
| 8862 | |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8863 | perf_event_header__init_id(&throttle_event.header, &sample, event); |
| 8864 | |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8865 | ret = perf_output_begin(&handle, &sample, event, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 8866 | throttle_event.header.size); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8867 | if (ret) |
| 8868 | return; |
| 8869 | |
| 8870 | perf_output_put(&handle, throttle_event); |
Arnaldo Carvalho de Melo | c980d10 | 2010-12-04 23:02:20 -0200 | [diff] [blame] | 8871 | perf_event__output_id_sample(event, &handle, &sample); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 8872 | perf_output_end(&handle); |
| 8873 | } |
| 8874 | |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 8875 | /* |
| 8876 | * ksymbol register/unregister tracking |
| 8877 | */ |
| 8878 | |
| 8879 | struct perf_ksymbol_event { |
| 8880 | const char *name; |
| 8881 | int name_len; |
| 8882 | struct { |
| 8883 | struct perf_event_header header; |
| 8884 | u64 addr; |
| 8885 | u32 len; |
| 8886 | u16 ksym_type; |
| 8887 | u16 flags; |
| 8888 | } event_id; |
| 8889 | }; |
| 8890 | |
| 8891 | static int perf_event_ksymbol_match(struct perf_event *event) |
| 8892 | { |
| 8893 | return event->attr.ksymbol; |
| 8894 | } |
| 8895 | |
| 8896 | static void perf_event_ksymbol_output(struct perf_event *event, void *data) |
| 8897 | { |
| 8898 | struct perf_ksymbol_event *ksymbol_event = data; |
| 8899 | struct perf_output_handle handle; |
| 8900 | struct perf_sample_data sample; |
| 8901 | int ret; |
| 8902 | |
| 8903 | if (!perf_event_ksymbol_match(event)) |
| 8904 | return; |
| 8905 | |
| 8906 | perf_event_header__init_id(&ksymbol_event->event_id.header, |
| 8907 | &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8908 | ret = perf_output_begin(&handle, &sample, event, |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 8909 | ksymbol_event->event_id.header.size); |
| 8910 | if (ret) |
| 8911 | return; |
| 8912 | |
| 8913 | perf_output_put(&handle, ksymbol_event->event_id); |
| 8914 | __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len); |
| 8915 | perf_event__output_id_sample(event, &handle, &sample); |
| 8916 | |
| 8917 | perf_output_end(&handle); |
| 8918 | } |
| 8919 | |
| 8920 | void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, |
| 8921 | const char *sym) |
| 8922 | { |
| 8923 | struct perf_ksymbol_event ksymbol_event; |
| 8924 | char name[KSYM_NAME_LEN]; |
| 8925 | u16 flags = 0; |
| 8926 | int name_len; |
| 8927 | |
| 8928 | if (!atomic_read(&nr_ksymbol_events)) |
| 8929 | return; |
| 8930 | |
| 8931 | if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX || |
| 8932 | ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN) |
| 8933 | goto err; |
| 8934 | |
| 8935 | strlcpy(name, sym, KSYM_NAME_LEN); |
| 8936 | name_len = strlen(name) + 1; |
| 8937 | while (!IS_ALIGNED(name_len, sizeof(u64))) |
| 8938 | name[name_len++] = '\0'; |
| 8939 | BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64)); |
| 8940 | |
| 8941 | if (unregister) |
| 8942 | flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER; |
| 8943 | |
| 8944 | ksymbol_event = (struct perf_ksymbol_event){ |
| 8945 | .name = name, |
| 8946 | .name_len = name_len, |
| 8947 | .event_id = { |
| 8948 | .header = { |
| 8949 | .type = PERF_RECORD_KSYMBOL, |
| 8950 | .size = sizeof(ksymbol_event.event_id) + |
| 8951 | name_len, |
| 8952 | }, |
| 8953 | .addr = addr, |
| 8954 | .len = len, |
| 8955 | .ksym_type = ksym_type, |
| 8956 | .flags = flags, |
| 8957 | }, |
| 8958 | }; |
| 8959 | |
| 8960 | perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL); |
| 8961 | return; |
| 8962 | err: |
| 8963 | WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type); |
| 8964 | } |
| 8965 | |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 8966 | /* |
| 8967 | * bpf program load/unload tracking |
| 8968 | */ |
| 8969 | |
| 8970 | struct perf_bpf_event { |
| 8971 | struct bpf_prog *prog; |
| 8972 | struct { |
| 8973 | struct perf_event_header header; |
| 8974 | u16 type; |
| 8975 | u16 flags; |
| 8976 | u32 id; |
| 8977 | u8 tag[BPF_TAG_SIZE]; |
| 8978 | } event_id; |
| 8979 | }; |
| 8980 | |
| 8981 | static int perf_event_bpf_match(struct perf_event *event) |
| 8982 | { |
| 8983 | return event->attr.bpf_event; |
| 8984 | } |
| 8985 | |
| 8986 | static void perf_event_bpf_output(struct perf_event *event, void *data) |
| 8987 | { |
| 8988 | struct perf_bpf_event *bpf_event = data; |
| 8989 | struct perf_output_handle handle; |
| 8990 | struct perf_sample_data sample; |
| 8991 | int ret; |
| 8992 | |
| 8993 | if (!perf_event_bpf_match(event)) |
| 8994 | return; |
| 8995 | |
| 8996 | perf_event_header__init_id(&bpf_event->event_id.header, |
| 8997 | &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 8998 | ret = perf_output_begin(&handle, data, event, |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 8999 | bpf_event->event_id.header.size); |
| 9000 | if (ret) |
| 9001 | return; |
| 9002 | |
| 9003 | perf_output_put(&handle, bpf_event->event_id); |
| 9004 | perf_event__output_id_sample(event, &handle, &sample); |
| 9005 | |
| 9006 | perf_output_end(&handle); |
| 9007 | } |
| 9008 | |
| 9009 | static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog, |
| 9010 | enum perf_bpf_event_type type) |
| 9011 | { |
| 9012 | bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD; |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 9013 | int i; |
| 9014 | |
| 9015 | if (prog->aux->func_cnt == 0) { |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 9016 | perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, |
| 9017 | (u64)(unsigned long)prog->bpf_func, |
Jiri Olsa | bfea9a8 | 2020-03-12 20:55:59 +0100 | [diff] [blame] | 9018 | prog->jited_len, unregister, |
| 9019 | prog->aux->ksym.name); |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 9020 | } else { |
| 9021 | for (i = 0; i < prog->aux->func_cnt; i++) { |
| 9022 | struct bpf_prog *subprog = prog->aux->func[i]; |
| 9023 | |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 9024 | perf_event_ksymbol( |
| 9025 | PERF_RECORD_KSYMBOL_TYPE_BPF, |
| 9026 | (u64)(unsigned long)subprog->bpf_func, |
Jiri Olsa | bfea9a8 | 2020-03-12 20:55:59 +0100 | [diff] [blame] | 9027 | subprog->jited_len, unregister, |
| 9028 | prog->aux->ksym.name); |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 9029 | } |
| 9030 | } |
| 9031 | } |
| 9032 | |
| 9033 | void perf_event_bpf_event(struct bpf_prog *prog, |
| 9034 | enum perf_bpf_event_type type, |
| 9035 | u16 flags) |
| 9036 | { |
| 9037 | struct perf_bpf_event bpf_event; |
| 9038 | |
| 9039 | if (type <= PERF_BPF_EVENT_UNKNOWN || |
| 9040 | type >= PERF_BPF_EVENT_MAX) |
| 9041 | return; |
| 9042 | |
| 9043 | switch (type) { |
| 9044 | case PERF_BPF_EVENT_PROG_LOAD: |
| 9045 | case PERF_BPF_EVENT_PROG_UNLOAD: |
| 9046 | if (atomic_read(&nr_ksymbol_events)) |
| 9047 | perf_event_bpf_emit_ksymbols(prog, type); |
| 9048 | break; |
| 9049 | default: |
| 9050 | break; |
| 9051 | } |
| 9052 | |
| 9053 | if (!atomic_read(&nr_bpf_events)) |
| 9054 | return; |
| 9055 | |
| 9056 | bpf_event = (struct perf_bpf_event){ |
| 9057 | .prog = prog, |
| 9058 | .event_id = { |
| 9059 | .header = { |
| 9060 | .type = PERF_RECORD_BPF_EVENT, |
| 9061 | .size = sizeof(bpf_event.event_id), |
| 9062 | }, |
| 9063 | .type = type, |
| 9064 | .flags = flags, |
| 9065 | .id = prog->aux->id, |
| 9066 | }, |
| 9067 | }; |
| 9068 | |
| 9069 | BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64)); |
| 9070 | |
| 9071 | memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE); |
| 9072 | perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL); |
| 9073 | } |
| 9074 | |
Adrian Hunter | e17d43b | 2020-05-12 15:19:08 +0300 | [diff] [blame] | 9075 | struct perf_text_poke_event { |
| 9076 | const void *old_bytes; |
| 9077 | const void *new_bytes; |
| 9078 | size_t pad; |
| 9079 | u16 old_len; |
| 9080 | u16 new_len; |
| 9081 | |
| 9082 | struct { |
| 9083 | struct perf_event_header header; |
| 9084 | |
| 9085 | u64 addr; |
| 9086 | } event_id; |
| 9087 | }; |
| 9088 | |
| 9089 | static int perf_event_text_poke_match(struct perf_event *event) |
| 9090 | { |
| 9091 | return event->attr.text_poke; |
| 9092 | } |
| 9093 | |
| 9094 | static void perf_event_text_poke_output(struct perf_event *event, void *data) |
| 9095 | { |
| 9096 | struct perf_text_poke_event *text_poke_event = data; |
| 9097 | struct perf_output_handle handle; |
| 9098 | struct perf_sample_data sample; |
| 9099 | u64 padding = 0; |
| 9100 | int ret; |
| 9101 | |
| 9102 | if (!perf_event_text_poke_match(event)) |
| 9103 | return; |
| 9104 | |
| 9105 | perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); |
| 9106 | |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 9107 | ret = perf_output_begin(&handle, &sample, event, |
| 9108 | text_poke_event->event_id.header.size); |
Adrian Hunter | e17d43b | 2020-05-12 15:19:08 +0300 | [diff] [blame] | 9109 | if (ret) |
| 9110 | return; |
| 9111 | |
| 9112 | perf_output_put(&handle, text_poke_event->event_id); |
| 9113 | perf_output_put(&handle, text_poke_event->old_len); |
| 9114 | perf_output_put(&handle, text_poke_event->new_len); |
| 9115 | |
| 9116 | __output_copy(&handle, text_poke_event->old_bytes, text_poke_event->old_len); |
| 9117 | __output_copy(&handle, text_poke_event->new_bytes, text_poke_event->new_len); |
| 9118 | |
| 9119 | if (text_poke_event->pad) |
| 9120 | __output_copy(&handle, &padding, text_poke_event->pad); |
| 9121 | |
| 9122 | perf_event__output_id_sample(event, &handle, &sample); |
| 9123 | |
| 9124 | perf_output_end(&handle); |
| 9125 | } |
| 9126 | |
| 9127 | void perf_event_text_poke(const void *addr, const void *old_bytes, |
| 9128 | size_t old_len, const void *new_bytes, size_t new_len) |
| 9129 | { |
| 9130 | struct perf_text_poke_event text_poke_event; |
| 9131 | size_t tot, pad; |
| 9132 | |
| 9133 | if (!atomic_read(&nr_text_poke_events)) |
| 9134 | return; |
| 9135 | |
| 9136 | tot = sizeof(text_poke_event.old_len) + old_len; |
| 9137 | tot += sizeof(text_poke_event.new_len) + new_len; |
| 9138 | pad = ALIGN(tot, sizeof(u64)) - tot; |
| 9139 | |
| 9140 | text_poke_event = (struct perf_text_poke_event){ |
| 9141 | .old_bytes = old_bytes, |
| 9142 | .new_bytes = new_bytes, |
| 9143 | .pad = pad, |
| 9144 | .old_len = old_len, |
| 9145 | .new_len = new_len, |
| 9146 | .event_id = { |
| 9147 | .header = { |
| 9148 | .type = PERF_RECORD_TEXT_POKE, |
| 9149 | .misc = PERF_RECORD_MISC_KERNEL, |
| 9150 | .size = sizeof(text_poke_event.event_id) + tot + pad, |
| 9151 | }, |
| 9152 | .addr = (unsigned long)addr, |
| 9153 | }, |
| 9154 | }; |
| 9155 | |
| 9156 | perf_iterate_sb(perf_event_text_poke_output, &text_poke_event, NULL); |
| 9157 | } |
| 9158 | |
Alexander Shishkin | 8d4e6c4 | 2017-03-30 18:39:56 +0300 | [diff] [blame] | 9159 | void perf_event_itrace_started(struct perf_event *event) |
| 9160 | { |
| 9161 | event->attach_state |= PERF_ATTACH_ITRACE; |
| 9162 | } |
| 9163 | |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 9164 | static void perf_log_itrace_start(struct perf_event *event) |
| 9165 | { |
| 9166 | struct perf_output_handle handle; |
| 9167 | struct perf_sample_data sample; |
| 9168 | struct perf_aux_event { |
| 9169 | struct perf_event_header header; |
| 9170 | u32 pid; |
| 9171 | u32 tid; |
| 9172 | } rec; |
| 9173 | int ret; |
| 9174 | |
| 9175 | if (event->parent) |
| 9176 | event = event->parent; |
| 9177 | |
| 9178 | if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || |
Alexander Shishkin | 8d4e6c4 | 2017-03-30 18:39:56 +0300 | [diff] [blame] | 9179 | event->attach_state & PERF_ATTACH_ITRACE) |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 9180 | return; |
| 9181 | |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 9182 | rec.header.type = PERF_RECORD_ITRACE_START; |
| 9183 | rec.header.misc = 0; |
| 9184 | rec.header.size = sizeof(rec); |
| 9185 | rec.pid = perf_event_pid(event, current); |
| 9186 | rec.tid = perf_event_tid(event, current); |
| 9187 | |
| 9188 | perf_event_header__init_id(&rec.header, &sample, event); |
Peter Zijlstra | 267fb27 | 2020-10-30 15:50:32 +0100 | [diff] [blame] | 9189 | ret = perf_output_begin(&handle, &sample, event, rec.header.size); |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 9190 | |
| 9191 | if (ret) |
| 9192 | return; |
| 9193 | |
| 9194 | perf_output_put(&handle, rec); |
| 9195 | perf_event__output_id_sample(event, &handle, &sample); |
| 9196 | |
| 9197 | perf_output_end(&handle); |
| 9198 | } |
| 9199 | |
Adrian Hunter | 8b8ff8c | 2021-09-07 19:39:01 +0300 | [diff] [blame] | 9200 | void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) |
| 9201 | { |
| 9202 | struct perf_output_handle handle; |
| 9203 | struct perf_sample_data sample; |
| 9204 | struct perf_aux_event { |
| 9205 | struct perf_event_header header; |
| 9206 | u64 hw_id; |
| 9207 | } rec; |
| 9208 | int ret; |
| 9209 | |
| 9210 | if (event->parent) |
| 9211 | event = event->parent; |
| 9212 | |
| 9213 | rec.header.type = PERF_RECORD_AUX_OUTPUT_HW_ID; |
| 9214 | rec.header.misc = 0; |
| 9215 | rec.header.size = sizeof(rec); |
| 9216 | rec.hw_id = hw_id; |
| 9217 | |
| 9218 | perf_event_header__init_id(&rec.header, &sample, event); |
| 9219 | ret = perf_output_begin(&handle, &sample, event, rec.header.size); |
| 9220 | |
| 9221 | if (ret) |
| 9222 | return; |
| 9223 | |
| 9224 | perf_output_put(&handle, rec); |
| 9225 | perf_event__output_id_sample(event, &handle, &sample); |
| 9226 | |
| 9227 | perf_output_end(&handle); |
| 9228 | } |
| 9229 | |
Jiri Olsa | 475113d | 2016-12-28 14:31:03 +0100 | [diff] [blame] | 9230 | static int |
| 9231 | __perf_event_account_interrupt(struct perf_event *event, int throttle) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9232 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9233 | struct hw_perf_event *hwc = &event->hw; |
| 9234 | int ret = 0; |
Jiri Olsa | 475113d | 2016-12-28 14:31:03 +0100 | [diff] [blame] | 9235 | u64 seq; |
Peter Zijlstra | 9639882 | 2010-11-24 18:55:29 +0100 | [diff] [blame] | 9236 | |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 9237 | seq = __this_cpu_read(perf_throttled_seq); |
| 9238 | if (seq != hwc->interrupts_seq) { |
| 9239 | hwc->interrupts_seq = seq; |
| 9240 | hwc->interrupts = 1; |
| 9241 | } else { |
| 9242 | hwc->interrupts++; |
| 9243 | if (unlikely(throttle |
| 9244 | && hwc->interrupts >= max_samples_per_tick)) { |
| 9245 | __this_cpu_inc(perf_throttled_count); |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 9246 | tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 9247 | hwc->interrupts = MAX_INTERRUPTS; |
| 9248 | perf_log_throttle(event, 0); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9249 | ret = 1; |
| 9250 | } |
Stephane Eranian | e050e3f | 2012-01-26 17:03:19 +0100 | [diff] [blame] | 9251 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9252 | |
| 9253 | if (event->attr.freq) { |
| 9254 | u64 now = perf_clock(); |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 9255 | s64 delta = now - hwc->freq_time_stamp; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9256 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 9257 | hwc->freq_time_stamp = now; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9258 | |
Peter Zijlstra | abd5071 | 2010-01-26 18:50:16 +0100 | [diff] [blame] | 9259 | if (delta > 0 && delta < 2*TICK_NSEC) |
Stephane Eranian | f39d47f | 2012-02-07 14:39:57 +0100 | [diff] [blame] | 9260 | perf_adjust_period(event, delta, hwc->last_period, true); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9261 | } |
| 9262 | |
Jiri Olsa | 475113d | 2016-12-28 14:31:03 +0100 | [diff] [blame] | 9263 | return ret; |
| 9264 | } |
| 9265 | |
| 9266 | int perf_event_account_interrupt(struct perf_event *event) |
| 9267 | { |
| 9268 | return __perf_event_account_interrupt(event, 1); |
| 9269 | } |
| 9270 | |
| 9271 | /* |
| 9272 | * Generic event overflow handling, sampling. |
| 9273 | */ |
| 9274 | |
| 9275 | static int __perf_event_overflow(struct perf_event *event, |
| 9276 | int throttle, struct perf_sample_data *data, |
| 9277 | struct pt_regs *regs) |
| 9278 | { |
| 9279 | int events = atomic_read(&event->event_limit); |
| 9280 | int ret = 0; |
| 9281 | |
| 9282 | /* |
| 9283 | * Non-sampling counters might still use the PMI to fold short |
| 9284 | * hardware counters, ignore those. |
| 9285 | */ |
| 9286 | if (unlikely(!is_sampling_event(event))) |
| 9287 | return 0; |
| 9288 | |
| 9289 | ret = __perf_event_account_interrupt(event, throttle); |
| 9290 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9291 | /* |
| 9292 | * XXX event_limit might not quite work as expected on inherited |
| 9293 | * events |
| 9294 | */ |
| 9295 | |
| 9296 | event->pending_kill = POLL_IN; |
| 9297 | if (events && atomic_dec_and_test(&event->event_limit)) { |
| 9298 | ret = 1; |
| 9299 | event->pending_kill = POLL_HUP; |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 9300 | event->pending_addr = data->addr; |
Jiri Olsa | 5aab90c | 2016-10-26 11:48:24 +0200 | [diff] [blame] | 9301 | |
| 9302 | perf_event_disable_inatomic(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9303 | } |
| 9304 | |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 9305 | READ_ONCE(event->overflow_handler)(event, data, regs); |
Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 9306 | |
Peter Zijlstra | fed66e2cd | 2015-06-11 10:32:01 +0200 | [diff] [blame] | 9307 | if (*perf_event_fasync(event) && event->pending_kill) { |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9308 | event->pending_wakeup = 1; |
| 9309 | irq_work_queue(&event->pending); |
Peter Zijlstra | f506b3d | 2011-05-26 17:02:53 +0200 | [diff] [blame] | 9310 | } |
| 9311 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9312 | return ret; |
| 9313 | } |
| 9314 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9315 | int perf_event_overflow(struct perf_event *event, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9316 | struct perf_sample_data *data, |
| 9317 | struct pt_regs *regs) |
| 9318 | { |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9319 | return __perf_event_overflow(event, 1, data, regs); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9320 | } |
| 9321 | |
| 9322 | /* |
| 9323 | * Generic software event infrastructure |
| 9324 | */ |
| 9325 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9326 | struct swevent_htable { |
| 9327 | struct swevent_hlist *swevent_hlist; |
| 9328 | struct mutex hlist_mutex; |
| 9329 | int hlist_refcount; |
| 9330 | |
| 9331 | /* Recursion avoidance in each contexts */ |
| 9332 | int recursion[PERF_NR_CONTEXTS]; |
| 9333 | }; |
| 9334 | |
| 9335 | static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); |
| 9336 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9337 | /* |
| 9338 | * We directly increment event->count and keep a second value in |
| 9339 | * event->hw.period_left to count intervals. This period event |
| 9340 | * is kept in the range [-sample_period, 0] so that we can use the |
| 9341 | * sign as trigger. |
| 9342 | */ |
| 9343 | |
Jiri Olsa | ab57384 | 2013-05-01 17:25:44 +0200 | [diff] [blame] | 9344 | u64 perf_swevent_set_period(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9345 | { |
| 9346 | struct hw_perf_event *hwc = &event->hw; |
| 9347 | u64 period = hwc->last_period; |
| 9348 | u64 nr, offset; |
| 9349 | s64 old, val; |
| 9350 | |
| 9351 | hwc->last_period = hwc->sample_period; |
| 9352 | |
| 9353 | again: |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 9354 | old = val = local64_read(&hwc->period_left); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9355 | if (val < 0) |
| 9356 | return 0; |
| 9357 | |
| 9358 | nr = div64_u64(period + val, period); |
| 9359 | offset = nr * period; |
| 9360 | val -= offset; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 9361 | if (local64_cmpxchg(&hwc->period_left, old, val) != old) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9362 | goto again; |
| 9363 | |
| 9364 | return nr; |
| 9365 | } |
| 9366 | |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 9367 | static void perf_swevent_overflow(struct perf_event *event, u64 overflow, |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9368 | struct perf_sample_data *data, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9369 | struct pt_regs *regs) |
| 9370 | { |
| 9371 | struct hw_perf_event *hwc = &event->hw; |
| 9372 | int throttle = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9373 | |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 9374 | if (!overflow) |
| 9375 | overflow = perf_swevent_set_period(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9376 | |
| 9377 | if (hwc->interrupts == MAX_INTERRUPTS) |
| 9378 | return; |
| 9379 | |
| 9380 | for (; overflow; overflow--) { |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9381 | if (__perf_event_overflow(event, throttle, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9382 | data, regs)) { |
| 9383 | /* |
| 9384 | * We inhibit the overflow from happening when |
| 9385 | * hwc->interrupts == MAX_INTERRUPTS. |
| 9386 | */ |
| 9387 | break; |
| 9388 | } |
| 9389 | throttle = 1; |
| 9390 | } |
| 9391 | } |
| 9392 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9393 | static void perf_swevent_event(struct perf_event *event, u64 nr, |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9394 | struct perf_sample_data *data, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9395 | struct pt_regs *regs) |
| 9396 | { |
| 9397 | struct hw_perf_event *hwc = &event->hw; |
| 9398 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 9399 | local64_add(nr, &event->count); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9400 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9401 | if (!regs) |
| 9402 | return; |
| 9403 | |
Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 9404 | if (!is_sampling_event(event)) |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 9405 | return; |
| 9406 | |
Andrew Vagin | 5d81e5c | 2011-11-07 15:54:12 +0300 | [diff] [blame] | 9407 | if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { |
| 9408 | data->period = nr; |
| 9409 | return perf_swevent_overflow(event, 1, data, regs); |
| 9410 | } else |
| 9411 | data->period = event->hw.last_period; |
| 9412 | |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 9413 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9414 | return perf_swevent_overflow(event, 1, data, regs); |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 9415 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 9416 | if (local64_add_negative(nr, &hwc->period_left)) |
Peter Zijlstra | 0cff784 | 2009-11-20 22:19:44 +0100 | [diff] [blame] | 9417 | return; |
| 9418 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9419 | perf_swevent_overflow(event, 0, data, regs); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9420 | } |
| 9421 | |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 9422 | static int perf_exclude_event(struct perf_event *event, |
| 9423 | struct pt_regs *regs) |
| 9424 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9425 | if (event->hw.state & PERF_HES_STOPPED) |
Frederic Weisbecker | 91b2f48 | 2011-03-07 21:27:08 +0100 | [diff] [blame] | 9426 | return 1; |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9427 | |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 9428 | if (regs) { |
| 9429 | if (event->attr.exclude_user && user_mode(regs)) |
| 9430 | return 1; |
| 9431 | |
| 9432 | if (event->attr.exclude_kernel && !user_mode(regs)) |
| 9433 | return 1; |
| 9434 | } |
| 9435 | |
| 9436 | return 0; |
| 9437 | } |
| 9438 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9439 | static int perf_swevent_match(struct perf_event *event, |
| 9440 | enum perf_type_id type, |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 9441 | u32 event_id, |
| 9442 | struct perf_sample_data *data, |
| 9443 | struct pt_regs *regs) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9444 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9445 | if (event->attr.type != type) |
| 9446 | return 0; |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 9447 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9448 | if (event->attr.config != event_id) |
| 9449 | return 0; |
| 9450 | |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 9451 | if (perf_exclude_event(event, regs)) |
| 9452 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9453 | |
| 9454 | return 1; |
| 9455 | } |
| 9456 | |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9457 | static inline u64 swevent_hash(u64 type, u32 event_id) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9458 | { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9459 | u64 val = event_id | (type << 32); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9460 | |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9461 | return hash_64(val, SWEVENT_HLIST_BITS); |
| 9462 | } |
| 9463 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9464 | static inline struct hlist_head * |
| 9465 | __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9466 | { |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9467 | u64 hash = swevent_hash(type, event_id); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9468 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9469 | return &hlist->heads[hash]; |
| 9470 | } |
| 9471 | |
| 9472 | /* For the read side: events when they trigger */ |
| 9473 | static inline struct hlist_head * |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9474 | find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9475 | { |
| 9476 | struct swevent_hlist *hlist; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9477 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9478 | hlist = rcu_dereference(swhash->swevent_hlist); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9479 | if (!hlist) |
| 9480 | return NULL; |
| 9481 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9482 | return __find_swevent_head(hlist, type, event_id); |
| 9483 | } |
| 9484 | |
| 9485 | /* For the event head insertion and removal in the hlist */ |
| 9486 | static inline struct hlist_head * |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9487 | find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9488 | { |
| 9489 | struct swevent_hlist *hlist; |
| 9490 | u32 event_id = event->attr.config; |
| 9491 | u64 type = event->attr.type; |
| 9492 | |
| 9493 | /* |
| 9494 | * Event scheduling is always serialized against hlist allocation |
| 9495 | * and release. Which makes the protected version suitable here. |
| 9496 | * The context lock guarantees that. |
| 9497 | */ |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9498 | hlist = rcu_dereference_protected(swhash->swevent_hlist, |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9499 | lockdep_is_held(&event->ctx->lock)); |
| 9500 | if (!hlist) |
| 9501 | return NULL; |
| 9502 | |
| 9503 | return __find_swevent_head(hlist, type, event_id); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9504 | } |
| 9505 | |
| 9506 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9507 | u64 nr, |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9508 | struct perf_sample_data *data, |
| 9509 | struct pt_regs *regs) |
| 9510 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 9511 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9512 | struct perf_event *event; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9513 | struct hlist_head *head; |
| 9514 | |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9515 | rcu_read_lock(); |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9516 | head = find_swevent_head_rcu(swhash, type, event_id); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9517 | if (!head) |
| 9518 | goto end; |
| 9519 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 9520 | hlist_for_each_entry_rcu(event, head, hlist_entry) { |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 9521 | if (perf_swevent_match(event, type, event_id, data, regs)) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9522 | perf_swevent_event(event, nr, data, regs); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9523 | } |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9524 | end: |
| 9525 | rcu_read_unlock(); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9526 | } |
| 9527 | |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 9528 | DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]); |
| 9529 | |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 9530 | int perf_swevent_get_recursion_context(void) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9531 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 9532 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 9533 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9534 | return get_recursion_context(swhash->recursion); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9535 | } |
Ingo Molnar | 645e8cc | 2009-11-22 12:20:19 +0100 | [diff] [blame] | 9536 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9537 | |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 9538 | void perf_swevent_put_recursion_context(int rctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9539 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 9540 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 9541 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9542 | put_recursion_context(swhash->recursion, rctx); |
Frederic Weisbecker | ce71b9d | 2009-11-22 05:26:55 +0100 | [diff] [blame] | 9543 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9544 | |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 9545 | void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9546 | { |
Ingo Molnar | a4234bf | 2009-11-23 10:57:59 +0100 | [diff] [blame] | 9547 | struct perf_sample_data data; |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 9548 | |
| 9549 | if (WARN_ON_ONCE(!regs)) |
| 9550 | return; |
| 9551 | |
| 9552 | perf_sample_data_init(&data, addr, 0); |
| 9553 | do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); |
| 9554 | } |
| 9555 | |
| 9556 | void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
| 9557 | { |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 9558 | int rctx; |
| 9559 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9560 | preempt_disable_notrace(); |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 9561 | rctx = perf_swevent_get_recursion_context(); |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 9562 | if (unlikely(rctx < 0)) |
| 9563 | goto fail; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9564 | |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 9565 | ___perf_sw_event(event_id, nr, regs, addr); |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 9566 | |
| 9567 | perf_swevent_put_recursion_context(rctx); |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 9568 | fail: |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9569 | preempt_enable_notrace(); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9570 | } |
| 9571 | |
| 9572 | static void perf_swevent_read(struct perf_event *event) |
| 9573 | { |
| 9574 | } |
| 9575 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9576 | static int perf_swevent_add(struct perf_event *event, int flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9577 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 9578 | struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9579 | struct hw_perf_event *hwc = &event->hw; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9580 | struct hlist_head *head; |
| 9581 | |
Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 9582 | if (is_sampling_event(event)) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9583 | hwc->last_period = hwc->sample_period; |
| 9584 | perf_swevent_set_period(event); |
| 9585 | } |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9586 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9587 | hwc->state = !(flags & PERF_EF_START); |
| 9588 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9589 | head = find_swevent_head(swhash, event); |
Peter Zijlstra | 12ca6ad | 2015-12-15 13:49:05 +0100 | [diff] [blame] | 9590 | if (WARN_ON_ONCE(!head)) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9591 | return -EINVAL; |
| 9592 | |
| 9593 | hlist_add_head_rcu(&event->hlist_entry, head); |
Shaohua Li | 6a694a6 | 2015-02-05 15:55:32 -0800 | [diff] [blame] | 9594 | perf_event_update_userpage(event); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9595 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9596 | return 0; |
| 9597 | } |
| 9598 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9599 | static void perf_swevent_del(struct perf_event *event, int flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9600 | { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9601 | hlist_del_rcu(&event->hlist_entry); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9602 | } |
| 9603 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9604 | static void perf_swevent_start(struct perf_event *event, int flags) |
Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 9605 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9606 | event->hw.state = 0; |
Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 9607 | } |
| 9608 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9609 | static void perf_swevent_stop(struct perf_event *event, int flags) |
Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 9610 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9611 | event->hw.state = PERF_HES_STOPPED; |
Peter Zijlstra | c6df8d5 | 2010-06-03 11:21:20 +0200 | [diff] [blame] | 9612 | } |
| 9613 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9614 | /* Deref the hlist from the update side */ |
| 9615 | static inline struct swevent_hlist * |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9616 | swevent_hlist_deref(struct swevent_htable *swhash) |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9617 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9618 | return rcu_dereference_protected(swhash->swevent_hlist, |
| 9619 | lockdep_is_held(&swhash->hlist_mutex)); |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9620 | } |
| 9621 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9622 | static void swevent_hlist_release(struct swevent_htable *swhash) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9623 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9624 | struct swevent_hlist *hlist = swevent_hlist_deref(swhash); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9625 | |
Frederic Weisbecker | 49f135e | 2010-05-20 10:17:46 +0200 | [diff] [blame] | 9626 | if (!hlist) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9627 | return; |
| 9628 | |
Andreea-Cristina Bernat | 70691d4 | 2014-08-22 16:26:05 +0300 | [diff] [blame] | 9629 | RCU_INIT_POINTER(swhash->swevent_hlist, NULL); |
Lai Jiangshan | fa4bbc4 | 2011-03-18 12:08:29 +0800 | [diff] [blame] | 9630 | kfree_rcu(hlist, rcu_head); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9631 | } |
| 9632 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9633 | static void swevent_hlist_put_cpu(int cpu) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9634 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9635 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9636 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9637 | mutex_lock(&swhash->hlist_mutex); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9638 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9639 | if (!--swhash->hlist_refcount) |
| 9640 | swevent_hlist_release(swhash); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9641 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9642 | mutex_unlock(&swhash->hlist_mutex); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9643 | } |
| 9644 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9645 | static void swevent_hlist_put(void) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9646 | { |
| 9647 | int cpu; |
| 9648 | |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9649 | for_each_possible_cpu(cpu) |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9650 | swevent_hlist_put_cpu(cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9651 | } |
| 9652 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9653 | static int swevent_hlist_get_cpu(int cpu) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9654 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9655 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9656 | int err = 0; |
| 9657 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9658 | mutex_lock(&swhash->hlist_mutex); |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 9659 | if (!swevent_hlist_deref(swhash) && |
| 9660 | cpumask_test_cpu(cpu, perf_online_mask)) { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9661 | struct swevent_hlist *hlist; |
| 9662 | |
| 9663 | hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); |
| 9664 | if (!hlist) { |
| 9665 | err = -ENOMEM; |
| 9666 | goto exit; |
| 9667 | } |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9668 | rcu_assign_pointer(swhash->swevent_hlist, hlist); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9669 | } |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9670 | swhash->hlist_refcount++; |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 9671 | exit: |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 9672 | mutex_unlock(&swhash->hlist_mutex); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9673 | |
| 9674 | return err; |
| 9675 | } |
| 9676 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9677 | static int swevent_hlist_get(void) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9678 | { |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9679 | int err, cpu, failed_cpu; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9680 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 9681 | mutex_lock(&pmus_lock); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9682 | for_each_possible_cpu(cpu) { |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9683 | err = swevent_hlist_get_cpu(cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9684 | if (err) { |
| 9685 | failed_cpu = cpu; |
| 9686 | goto fail; |
| 9687 | } |
| 9688 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 9689 | mutex_unlock(&pmus_lock); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9690 | return 0; |
Peter Zijlstra | 9ed6060 | 2010-06-11 17:36:35 +0200 | [diff] [blame] | 9691 | fail: |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9692 | for_each_possible_cpu(cpu) { |
| 9693 | if (cpu == failed_cpu) |
| 9694 | break; |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9695 | swevent_hlist_put_cpu(cpu); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9696 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 9697 | mutex_unlock(&pmus_lock); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9698 | return err; |
| 9699 | } |
| 9700 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 9701 | struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 9702 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9703 | static void sw_perf_event_destroy(struct perf_event *event) |
| 9704 | { |
| 9705 | u64 event_id = event->attr.config; |
| 9706 | |
| 9707 | WARN_ON(event->parent); |
| 9708 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 9709 | static_key_slow_dec(&perf_swevent_enabled[event_id]); |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9710 | swevent_hlist_put(); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9711 | } |
| 9712 | |
| 9713 | static int perf_swevent_init(struct perf_event *event) |
| 9714 | { |
Tommi Rantala | 8176cce | 2013-04-13 22:49:14 +0300 | [diff] [blame] | 9715 | u64 event_id = event->attr.config; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9716 | |
| 9717 | if (event->attr.type != PERF_TYPE_SOFTWARE) |
| 9718 | return -ENOENT; |
| 9719 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 9720 | /* |
| 9721 | * no branch sampling for software events |
| 9722 | */ |
| 9723 | if (has_branch_stack(event)) |
| 9724 | return -EOPNOTSUPP; |
| 9725 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9726 | switch (event_id) { |
| 9727 | case PERF_COUNT_SW_CPU_CLOCK: |
| 9728 | case PERF_COUNT_SW_TASK_CLOCK: |
| 9729 | return -ENOENT; |
| 9730 | |
| 9731 | default: |
| 9732 | break; |
| 9733 | } |
| 9734 | |
Dan Carpenter | ce67783 | 2010-10-24 21:50:42 +0200 | [diff] [blame] | 9735 | if (event_id >= PERF_COUNT_SW_MAX) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9736 | return -ENOENT; |
| 9737 | |
| 9738 | if (!event->parent) { |
| 9739 | int err; |
| 9740 | |
Thomas Gleixner | 3b364d7 | 2016-02-09 20:11:40 +0000 | [diff] [blame] | 9741 | err = swevent_hlist_get(); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9742 | if (err) |
| 9743 | return err; |
| 9744 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 9745 | static_key_slow_inc(&perf_swevent_enabled[event_id]); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9746 | event->destroy = sw_perf_event_destroy; |
| 9747 | } |
| 9748 | |
| 9749 | return 0; |
| 9750 | } |
| 9751 | |
| 9752 | static struct pmu perf_swevent = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 9753 | .task_ctx_nr = perf_sw_context, |
| 9754 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 9755 | .capabilities = PERF_PMU_CAP_NO_NMI, |
| 9756 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9757 | .event_init = perf_swevent_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9758 | .add = perf_swevent_add, |
| 9759 | .del = perf_swevent_del, |
| 9760 | .start = perf_swevent_start, |
| 9761 | .stop = perf_swevent_stop, |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9762 | .read = perf_swevent_read, |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9763 | }; |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 9764 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9765 | #ifdef CONFIG_EVENT_TRACING |
| 9766 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9767 | static int perf_tp_filter_match(struct perf_event *event, |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 9768 | struct perf_sample_data *data) |
| 9769 | { |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 9770 | void *record = data->raw->frag.data; |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 9771 | |
Peter Zijlstra | b71b437 | 2015-11-02 10:50:51 +0100 | [diff] [blame] | 9772 | /* only top level events have filters set */ |
| 9773 | if (event->parent) |
| 9774 | event = event->parent; |
| 9775 | |
Frederic Weisbecker | 95476b6 | 2010-04-14 23:42:18 +0200 | [diff] [blame] | 9776 | if (likely(!event->filter) || filter_match_preds(event->filter, record)) |
| 9777 | return 1; |
| 9778 | return 0; |
| 9779 | } |
| 9780 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9781 | static int perf_tp_event_match(struct perf_event *event, |
| 9782 | struct perf_sample_data *data, |
| 9783 | struct pt_regs *regs) |
| 9784 | { |
Frederic Weisbecker | a0f7d0f | 2011-03-07 21:27:09 +0100 | [diff] [blame] | 9785 | if (event->hw.state & PERF_HES_STOPPED) |
| 9786 | return 0; |
Peter Zijlstra | 580d607 | 2010-05-20 20:54:31 +0200 | [diff] [blame] | 9787 | /* |
Song Liu | 9fd2e48 | 2019-05-07 09:15:45 -0700 | [diff] [blame] | 9788 | * If exclude_kernel, only trace user-space tracepoints (uprobes) |
Peter Zijlstra | 580d607 | 2010-05-20 20:54:31 +0200 | [diff] [blame] | 9789 | */ |
Song Liu | 9fd2e48 | 2019-05-07 09:15:45 -0700 | [diff] [blame] | 9790 | if (event->attr.exclude_kernel && !user_mode(regs)) |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9791 | return 0; |
| 9792 | |
| 9793 | if (!perf_tp_filter_match(event, data)) |
| 9794 | return 0; |
| 9795 | |
| 9796 | return 1; |
| 9797 | } |
| 9798 | |
Alexei Starovoitov | 85b67bc | 2016-04-18 20:11:50 -0700 | [diff] [blame] | 9799 | void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, |
| 9800 | struct trace_event_call *call, u64 count, |
| 9801 | struct pt_regs *regs, struct hlist_head *head, |
| 9802 | struct task_struct *task) |
| 9803 | { |
Yonghong Song | e87c6bc | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 9804 | if (bpf_prog_array_valid(call)) { |
Alexei Starovoitov | 85b67bc | 2016-04-18 20:11:50 -0700 | [diff] [blame] | 9805 | *(struct pt_regs **)raw_data = regs; |
Yonghong Song | e87c6bc | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 9806 | if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) { |
Alexei Starovoitov | 85b67bc | 2016-04-18 20:11:50 -0700 | [diff] [blame] | 9807 | perf_swevent_put_recursion_context(rctx); |
| 9808 | return; |
| 9809 | } |
| 9810 | } |
| 9811 | perf_tp_event(call->event.type, count, raw_data, size, regs, head, |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 9812 | rctx, task); |
Alexei Starovoitov | 85b67bc | 2016-04-18 20:11:50 -0700 | [diff] [blame] | 9813 | } |
| 9814 | EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit); |
| 9815 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 9816 | void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 9817 | struct pt_regs *regs, struct hlist_head *head, int rctx, |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 9818 | struct task_struct *task) |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9819 | { |
| 9820 | struct perf_sample_data data; |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 9821 | struct perf_event *event; |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9822 | |
| 9823 | struct perf_raw_record raw = { |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 9824 | .frag = { |
| 9825 | .size = entry_size, |
| 9826 | .data = record, |
| 9827 | }, |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9828 | }; |
| 9829 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 9830 | perf_sample_data_init(&data, 0, 0); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9831 | data.raw = &raw; |
| 9832 | |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 9833 | perf_trace_buf_update(record, event_type); |
| 9834 | |
Peter Zijlstra | 8fd0fbb | 2017-10-11 09:45:29 +0200 | [diff] [blame] | 9835 | hlist_for_each_entry_rcu(event, head, hlist_entry) { |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9836 | if (perf_tp_event_match(event, &data, regs)) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 9837 | perf_swevent_event(event, count, &data, regs); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9838 | } |
Peter Zijlstra | ecc55f8 | 2010-05-21 15:11:34 +0200 | [diff] [blame] | 9839 | |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 9840 | /* |
| 9841 | * If we got specified a target task, also iterate its context and |
| 9842 | * deliver this event there too. |
| 9843 | */ |
| 9844 | if (task && task != current) { |
| 9845 | struct perf_event_context *ctx; |
| 9846 | struct trace_entry *entry = record; |
| 9847 | |
| 9848 | rcu_read_lock(); |
| 9849 | ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); |
| 9850 | if (!ctx) |
| 9851 | goto unlock; |
| 9852 | |
| 9853 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
Jiri Olsa | cd6fb677 | 2018-09-23 18:13:43 +0200 | [diff] [blame] | 9854 | if (event->cpu != smp_processor_id()) |
| 9855 | continue; |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 9856 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
| 9857 | continue; |
| 9858 | if (event->attr.config != entry->type) |
| 9859 | continue; |
Marco Elver | 73743c3 | 2021-11-09 13:22:32 +0100 | [diff] [blame] | 9860 | /* Cannot deliver synchronous signal to other task. */ |
| 9861 | if (event->attr.sigtrap) |
| 9862 | continue; |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 9863 | if (perf_tp_event_match(event, &data, regs)) |
| 9864 | perf_swevent_event(event, count, &data, regs); |
| 9865 | } |
| 9866 | unlock: |
| 9867 | rcu_read_unlock(); |
| 9868 | } |
| 9869 | |
Peter Zijlstra | ecc55f8 | 2010-05-21 15:11:34 +0200 | [diff] [blame] | 9870 | perf_swevent_put_recursion_context(rctx); |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9871 | } |
| 9872 | EXPORT_SYMBOL_GPL(perf_tp_event); |
| 9873 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9874 | static void tp_perf_event_destroy(struct perf_event *event) |
| 9875 | { |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9876 | perf_trace_destroy(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9877 | } |
| 9878 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9879 | static int perf_tp_event_init(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9880 | { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 9881 | int err; |
| 9882 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9883 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
| 9884 | return -ENOENT; |
| 9885 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 9886 | /* |
| 9887 | * no branch sampling for tracepoint events |
| 9888 | */ |
| 9889 | if (has_branch_stack(event)) |
| 9890 | return -EOPNOTSUPP; |
| 9891 | |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 9892 | err = perf_trace_init(event); |
| 9893 | if (err) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9894 | return err; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 9895 | |
| 9896 | event->destroy = tp_perf_event_destroy; |
| 9897 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9898 | return 0; |
| 9899 | } |
| 9900 | |
| 9901 | static struct pmu perf_tracepoint = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 9902 | .task_ctx_nr = perf_sw_context, |
| 9903 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9904 | .event_init = perf_tp_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 9905 | .add = perf_trace_add, |
| 9906 | .del = perf_trace_del, |
| 9907 | .start = perf_swevent_start, |
| 9908 | .stop = perf_swevent_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9909 | .read = perf_swevent_read, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 9910 | }; |
| 9911 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9912 | #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9913 | /* |
| 9914 | * Flags in config, used by dynamic PMU kprobe and uprobe |
| 9915 | * The flags should match following PMU_FORMAT_ATTR(). |
| 9916 | * |
| 9917 | * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe |
| 9918 | * if not set, create kprobe/uprobe |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9919 | * |
| 9920 | * The following values specify a reference counter (or semaphore in the |
| 9921 | * terminology of tools like dtrace, systemtap, etc.) Userspace Statically |
| 9922 | * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset. |
| 9923 | * |
| 9924 | * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset |
| 9925 | * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9926 | */ |
| 9927 | enum perf_probe_config { |
| 9928 | PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */ |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9929 | PERF_UPROBE_REF_CTR_OFFSET_BITS = 32, |
| 9930 | PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS, |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9931 | }; |
| 9932 | |
| 9933 | PMU_FORMAT_ATTR(retprobe, "config:0"); |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9934 | #endif |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9935 | |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9936 | #ifdef CONFIG_KPROBE_EVENTS |
| 9937 | static struct attribute *kprobe_attrs[] = { |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9938 | &format_attr_retprobe.attr, |
| 9939 | NULL, |
| 9940 | }; |
| 9941 | |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9942 | static struct attribute_group kprobe_format_group = { |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9943 | .name = "format", |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9944 | .attrs = kprobe_attrs, |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9945 | }; |
| 9946 | |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9947 | static const struct attribute_group *kprobe_attr_groups[] = { |
| 9948 | &kprobe_format_group, |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9949 | NULL, |
| 9950 | }; |
| 9951 | |
| 9952 | static int perf_kprobe_event_init(struct perf_event *event); |
| 9953 | static struct pmu perf_kprobe = { |
| 9954 | .task_ctx_nr = perf_sw_context, |
| 9955 | .event_init = perf_kprobe_event_init, |
| 9956 | .add = perf_trace_add, |
| 9957 | .del = perf_trace_del, |
| 9958 | .start = perf_swevent_start, |
| 9959 | .stop = perf_swevent_stop, |
| 9960 | .read = perf_swevent_read, |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9961 | .attr_groups = kprobe_attr_groups, |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9962 | }; |
| 9963 | |
| 9964 | static int perf_kprobe_event_init(struct perf_event *event) |
| 9965 | { |
| 9966 | int err; |
| 9967 | bool is_retprobe; |
| 9968 | |
| 9969 | if (event->attr.type != perf_kprobe.type) |
| 9970 | return -ENOENT; |
Song Liu | 32e6e96 | 2018-04-11 18:02:37 +0000 | [diff] [blame] | 9971 | |
Alexey Budankov | c9e0924 | 2020-04-02 11:47:01 +0300 | [diff] [blame] | 9972 | if (!perfmon_capable()) |
Song Liu | 32e6e96 | 2018-04-11 18:02:37 +0000 | [diff] [blame] | 9973 | return -EACCES; |
| 9974 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 9975 | /* |
| 9976 | * no branch sampling for probe events |
| 9977 | */ |
| 9978 | if (has_branch_stack(event)) |
| 9979 | return -EOPNOTSUPP; |
| 9980 | |
| 9981 | is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; |
| 9982 | err = perf_kprobe_init(event, is_retprobe); |
| 9983 | if (err) |
| 9984 | return err; |
| 9985 | |
| 9986 | event->destroy = perf_kprobe_destroy; |
| 9987 | |
| 9988 | return 0; |
| 9989 | } |
| 9990 | #endif /* CONFIG_KPROBE_EVENTS */ |
| 9991 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 9992 | #ifdef CONFIG_UPROBE_EVENTS |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 9993 | PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63"); |
| 9994 | |
| 9995 | static struct attribute *uprobe_attrs[] = { |
| 9996 | &format_attr_retprobe.attr, |
| 9997 | &format_attr_ref_ctr_offset.attr, |
| 9998 | NULL, |
| 9999 | }; |
| 10000 | |
| 10001 | static struct attribute_group uprobe_format_group = { |
| 10002 | .name = "format", |
| 10003 | .attrs = uprobe_attrs, |
| 10004 | }; |
| 10005 | |
| 10006 | static const struct attribute_group *uprobe_attr_groups[] = { |
| 10007 | &uprobe_format_group, |
| 10008 | NULL, |
| 10009 | }; |
| 10010 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 10011 | static int perf_uprobe_event_init(struct perf_event *event); |
| 10012 | static struct pmu perf_uprobe = { |
| 10013 | .task_ctx_nr = perf_sw_context, |
| 10014 | .event_init = perf_uprobe_event_init, |
| 10015 | .add = perf_trace_add, |
| 10016 | .del = perf_trace_del, |
| 10017 | .start = perf_swevent_start, |
| 10018 | .stop = perf_swevent_stop, |
| 10019 | .read = perf_swevent_read, |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 10020 | .attr_groups = uprobe_attr_groups, |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 10021 | }; |
| 10022 | |
| 10023 | static int perf_uprobe_event_init(struct perf_event *event) |
| 10024 | { |
| 10025 | int err; |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 10026 | unsigned long ref_ctr_offset; |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 10027 | bool is_retprobe; |
| 10028 | |
| 10029 | if (event->attr.type != perf_uprobe.type) |
| 10030 | return -ENOENT; |
Song Liu | 32e6e96 | 2018-04-11 18:02:37 +0000 | [diff] [blame] | 10031 | |
Alexey Budankov | c9e0924 | 2020-04-02 11:47:01 +0300 | [diff] [blame] | 10032 | if (!perfmon_capable()) |
Song Liu | 32e6e96 | 2018-04-11 18:02:37 +0000 | [diff] [blame] | 10033 | return -EACCES; |
| 10034 | |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 10035 | /* |
| 10036 | * no branch sampling for probe events |
| 10037 | */ |
| 10038 | if (has_branch_stack(event)) |
| 10039 | return -EOPNOTSUPP; |
| 10040 | |
| 10041 | is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; |
Song Liu | a6ca88b | 2018-10-01 22:36:36 -0700 | [diff] [blame] | 10042 | ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; |
| 10043 | err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 10044 | if (err) |
| 10045 | return err; |
| 10046 | |
| 10047 | event->destroy = perf_uprobe_destroy; |
| 10048 | |
| 10049 | return 0; |
| 10050 | } |
| 10051 | #endif /* CONFIG_UPROBE_EVENTS */ |
| 10052 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10053 | static inline void perf_tp_register(void) |
| 10054 | { |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 10055 | perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 10056 | #ifdef CONFIG_KPROBE_EVENTS |
| 10057 | perf_pmu_register(&perf_kprobe, "kprobe", -1); |
| 10058 | #endif |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 10059 | #ifdef CONFIG_UPROBE_EVENTS |
| 10060 | perf_pmu_register(&perf_uprobe, "uprobe", -1); |
| 10061 | #endif |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10062 | } |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 10063 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 10064 | static void perf_event_free_filter(struct perf_event *event) |
| 10065 | { |
| 10066 | ftrace_profile_free_filter(event); |
| 10067 | } |
| 10068 | |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10069 | #ifdef CONFIG_BPF_SYSCALL |
| 10070 | static void bpf_overflow_handler(struct perf_event *event, |
| 10071 | struct perf_sample_data *data, |
| 10072 | struct pt_regs *regs) |
| 10073 | { |
| 10074 | struct bpf_perf_event_data_kern ctx = { |
| 10075 | .data = data, |
Yonghong Song | 7d9285e | 2017-10-05 09:19:19 -0700 | [diff] [blame] | 10076 | .event = event, |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10077 | }; |
Yonghong Song | 594286b | 2021-08-19 08:52:09 -0700 | [diff] [blame] | 10078 | struct bpf_prog *prog; |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10079 | int ret = 0; |
| 10080 | |
Hendrik Brueckner | c895f6f | 2017-12-04 10:56:44 +0100 | [diff] [blame] | 10081 | ctx.regs = perf_arch_bpf_user_pt_regs(regs); |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10082 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) |
| 10083 | goto out; |
| 10084 | rcu_read_lock(); |
Yonghong Song | 594286b | 2021-08-19 08:52:09 -0700 | [diff] [blame] | 10085 | prog = READ_ONCE(event->prog); |
| 10086 | if (prog) |
| 10087 | ret = bpf_prog_run(prog, &ctx); |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10088 | rcu_read_unlock(); |
| 10089 | out: |
| 10090 | __this_cpu_dec(bpf_prog_active); |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10091 | if (!ret) |
| 10092 | return; |
| 10093 | |
| 10094 | event->orig_overflow_handler(event, data, regs); |
| 10095 | } |
| 10096 | |
Andrii Nakryiko | 82e6b1e | 2021-08-15 00:05:58 -0700 | [diff] [blame] | 10097 | static int perf_event_set_bpf_handler(struct perf_event *event, |
| 10098 | struct bpf_prog *prog, |
| 10099 | u64 bpf_cookie) |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10100 | { |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10101 | if (event->overflow_handler_context) |
| 10102 | /* hw breakpoint or kernel counter */ |
| 10103 | return -EINVAL; |
| 10104 | |
| 10105 | if (event->prog) |
| 10106 | return -EEXIST; |
| 10107 | |
Andrii Nakryiko | 652c1b1 | 2021-08-15 00:05:56 -0700 | [diff] [blame] | 10108 | if (prog->type != BPF_PROG_TYPE_PERF_EVENT) |
| 10109 | return -EINVAL; |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10110 | |
Song Liu | 5d99cb2c | 2020-07-23 11:06:45 -0700 | [diff] [blame] | 10111 | if (event->attr.precise_ip && |
| 10112 | prog->call_get_stack && |
| 10113 | (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) || |
| 10114 | event->attr.exclude_callchain_kernel || |
| 10115 | event->attr.exclude_callchain_user)) { |
| 10116 | /* |
| 10117 | * On perf_event with precise_ip, calling bpf_get_stack() |
| 10118 | * may trigger unwinder warnings and occasional crashes. |
| 10119 | * bpf_get_[stack|stackid] works around this issue by using |
| 10120 | * callchain attached to perf_sample_data. If the |
| 10121 | * perf_event does not full (kernel and user) callchain |
| 10122 | * attached to perf_sample_data, do not allow attaching BPF |
| 10123 | * program that calls bpf_get_[stack|stackid]. |
| 10124 | */ |
Song Liu | 5d99cb2c | 2020-07-23 11:06:45 -0700 | [diff] [blame] | 10125 | return -EPROTO; |
| 10126 | } |
| 10127 | |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10128 | event->prog = prog; |
Andrii Nakryiko | 82e6b1e | 2021-08-15 00:05:58 -0700 | [diff] [blame] | 10129 | event->bpf_cookie = bpf_cookie; |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10130 | event->orig_overflow_handler = READ_ONCE(event->overflow_handler); |
| 10131 | WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); |
| 10132 | return 0; |
| 10133 | } |
| 10134 | |
| 10135 | static void perf_event_free_bpf_handler(struct perf_event *event) |
| 10136 | { |
| 10137 | struct bpf_prog *prog = event->prog; |
| 10138 | |
| 10139 | if (!prog) |
| 10140 | return; |
| 10141 | |
| 10142 | WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); |
| 10143 | event->prog = NULL; |
| 10144 | bpf_prog_put(prog); |
| 10145 | } |
| 10146 | #else |
Andrii Nakryiko | 82e6b1e | 2021-08-15 00:05:58 -0700 | [diff] [blame] | 10147 | static int perf_event_set_bpf_handler(struct perf_event *event, |
| 10148 | struct bpf_prog *prog, |
| 10149 | u64 bpf_cookie) |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 10150 | { |
| 10151 | return -EOPNOTSUPP; |
| 10152 | } |
| 10153 | static void perf_event_free_bpf_handler(struct perf_event *event) |
| 10154 | { |
| 10155 | } |
| 10156 | #endif |
| 10157 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 10158 | /* |
| 10159 | * returns true if the event is a tracepoint, or a kprobe/upprobe created |
| 10160 | * with perf_event_open() |
| 10161 | */ |
| 10162 | static inline bool perf_event_is_tracing(struct perf_event *event) |
| 10163 | { |
| 10164 | if (event->pmu == &perf_tracepoint) |
| 10165 | return true; |
| 10166 | #ifdef CONFIG_KPROBE_EVENTS |
| 10167 | if (event->pmu == &perf_kprobe) |
| 10168 | return true; |
| 10169 | #endif |
Song Liu | 33ea4b2 | 2017-12-06 14:45:16 -0800 | [diff] [blame] | 10170 | #ifdef CONFIG_UPROBE_EVENTS |
| 10171 | if (event->pmu == &perf_uprobe) |
| 10172 | return true; |
| 10173 | #endif |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 10174 | return false; |
| 10175 | } |
| 10176 | |
Andrii Nakryiko | 82e6b1e | 2021-08-15 00:05:58 -0700 | [diff] [blame] | 10177 | int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, |
| 10178 | u64 bpf_cookie) |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10179 | { |
Yonghong Song | cf5f5ce | 2017-08-04 16:00:09 -0700 | [diff] [blame] | 10180 | bool is_kprobe, is_tracepoint, is_syscall_tp; |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10181 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 10182 | if (!perf_event_is_tracing(event)) |
Andrii Nakryiko | 82e6b1e | 2021-08-15 00:05:58 -0700 | [diff] [blame] | 10183 | return perf_event_set_bpf_handler(event, prog, bpf_cookie); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10184 | |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 10185 | is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; |
| 10186 | is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; |
Yonghong Song | cf5f5ce | 2017-08-04 16:00:09 -0700 | [diff] [blame] | 10187 | is_syscall_tp = is_syscall_trace_event(event->tp_event); |
| 10188 | if (!is_kprobe && !is_tracepoint && !is_syscall_tp) |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 10189 | /* bpf programs can only be attached to u/kprobe or tracepoint */ |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10190 | return -EINVAL; |
| 10191 | |
Alexei Starovoitov | 98b5c2c | 2016-04-06 18:43:25 -0700 | [diff] [blame] | 10192 | if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) || |
Yonghong Song | cf5f5ce | 2017-08-04 16:00:09 -0700 | [diff] [blame] | 10193 | (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) || |
Andrii Nakryiko | 652c1b1 | 2021-08-15 00:05:56 -0700 | [diff] [blame] | 10194 | (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10195 | return -EINVAL; |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10196 | |
Josef Bacik | 9802d86 | 2017-12-11 11:36:48 -0500 | [diff] [blame] | 10197 | /* Kprobe override only works for kprobes, not uprobes. */ |
| 10198 | if (prog->kprobe_override && |
Andrii Nakryiko | 652c1b1 | 2021-08-15 00:05:56 -0700 | [diff] [blame] | 10199 | !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) |
Josef Bacik | 9802d86 | 2017-12-11 11:36:48 -0500 | [diff] [blame] | 10200 | return -EINVAL; |
Josef Bacik | 9802d86 | 2017-12-11 11:36:48 -0500 | [diff] [blame] | 10201 | |
Yonghong Song | cf5f5ce | 2017-08-04 16:00:09 -0700 | [diff] [blame] | 10202 | if (is_tracepoint || is_syscall_tp) { |
Alexei Starovoitov | 32bbe00 | 2016-04-06 18:43:28 -0700 | [diff] [blame] | 10203 | int off = trace_event_get_offsets(event->tp_event); |
| 10204 | |
Andrii Nakryiko | 652c1b1 | 2021-08-15 00:05:56 -0700 | [diff] [blame] | 10205 | if (prog->aux->max_ctx_offset > off) |
Alexei Starovoitov | 32bbe00 | 2016-04-06 18:43:28 -0700 | [diff] [blame] | 10206 | return -EACCES; |
Alexei Starovoitov | 32bbe00 | 2016-04-06 18:43:28 -0700 | [diff] [blame] | 10207 | } |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10208 | |
Andrii Nakryiko | 82e6b1e | 2021-08-15 00:05:58 -0700 | [diff] [blame] | 10209 | return perf_event_attach_bpf_prog(event, prog, bpf_cookie); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10210 | } |
| 10211 | |
Andrii Nakryiko | b89fbfb | 2021-08-15 00:05:57 -0700 | [diff] [blame] | 10212 | void perf_event_free_bpf_prog(struct perf_event *event) |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10213 | { |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 10214 | if (!perf_event_is_tracing(event)) { |
Yonghong Song | 0b4c684 | 2017-10-23 23:53:07 -0700 | [diff] [blame] | 10215 | perf_event_free_bpf_handler(event); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10216 | return; |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10217 | } |
Yonghong Song | e87c6bc | 2017-10-23 23:53:08 -0700 | [diff] [blame] | 10218 | perf_event_detach_bpf_prog(event); |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10219 | } |
| 10220 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10221 | #else |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 10222 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10223 | static inline void perf_tp_register(void) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10224 | { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10225 | } |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 10226 | |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 10227 | static void perf_event_free_filter(struct perf_event *event) |
| 10228 | { |
| 10229 | } |
| 10230 | |
Andrii Nakryiko | 82e6b1e | 2021-08-15 00:05:58 -0700 | [diff] [blame] | 10231 | int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, |
| 10232 | u64 bpf_cookie) |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10233 | { |
| 10234 | return -ENOENT; |
| 10235 | } |
| 10236 | |
Andrii Nakryiko | b89fbfb | 2021-08-15 00:05:57 -0700 | [diff] [blame] | 10237 | void perf_event_free_bpf_prog(struct perf_event *event) |
Alexei Starovoitov | 2541517 | 2015-03-25 12:49:20 -0700 | [diff] [blame] | 10238 | { |
| 10239 | } |
Li Zefan | 07b139c | 2009-12-21 14:27:35 +0800 | [diff] [blame] | 10240 | #endif /* CONFIG_EVENT_TRACING */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10241 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 10242 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 10243 | void perf_bp_event(struct perf_event *bp, void *data) |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 10244 | { |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 10245 | struct perf_sample_data sample; |
| 10246 | struct pt_regs *regs = data; |
| 10247 | |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 10248 | perf_sample_data_init(&sample, bp->attr.bp_addr, 0); |
Frederic Weisbecker | f5ffe02 | 2009-11-23 15:42:34 +0100 | [diff] [blame] | 10249 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10250 | if (!bp->hw.state && !perf_exclude_event(bp, regs)) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 10251 | perf_swevent_event(bp, 1, &sample, regs); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 10252 | } |
| 10253 | #endif |
| 10254 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10255 | /* |
| 10256 | * Allocate a new address filter |
| 10257 | */ |
| 10258 | static struct perf_addr_filter * |
| 10259 | perf_addr_filter_new(struct perf_event *event, struct list_head *filters) |
| 10260 | { |
| 10261 | int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); |
| 10262 | struct perf_addr_filter *filter; |
| 10263 | |
| 10264 | filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node); |
| 10265 | if (!filter) |
| 10266 | return NULL; |
| 10267 | |
| 10268 | INIT_LIST_HEAD(&filter->entry); |
| 10269 | list_add_tail(&filter->entry, filters); |
| 10270 | |
| 10271 | return filter; |
| 10272 | } |
| 10273 | |
| 10274 | static void free_filters_list(struct list_head *filters) |
| 10275 | { |
| 10276 | struct perf_addr_filter *filter, *iter; |
| 10277 | |
| 10278 | list_for_each_entry_safe(filter, iter, filters, entry) { |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 10279 | path_put(&filter->path); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10280 | list_del(&filter->entry); |
| 10281 | kfree(filter); |
| 10282 | } |
| 10283 | } |
| 10284 | |
| 10285 | /* |
| 10286 | * Free existing address filters and optionally install new ones |
| 10287 | */ |
| 10288 | static void perf_addr_filters_splice(struct perf_event *event, |
| 10289 | struct list_head *head) |
| 10290 | { |
| 10291 | unsigned long flags; |
| 10292 | LIST_HEAD(list); |
| 10293 | |
| 10294 | if (!has_addr_filter(event)) |
| 10295 | return; |
| 10296 | |
| 10297 | /* don't bother with children, they don't have their own filters */ |
| 10298 | if (event->parent) |
| 10299 | return; |
| 10300 | |
| 10301 | raw_spin_lock_irqsave(&event->addr_filters.lock, flags); |
| 10302 | |
| 10303 | list_splice_init(&event->addr_filters.list, &list); |
| 10304 | if (head) |
| 10305 | list_splice(head, &event->addr_filters.list); |
| 10306 | |
| 10307 | raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); |
| 10308 | |
| 10309 | free_filters_list(&list); |
| 10310 | } |
| 10311 | |
| 10312 | /* |
| 10313 | * Scan through mm's vmas and see if one of them matches the |
| 10314 | * @filter; if so, adjust filter's address range. |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 10315 | * Called with mm::mmap_lock down for reading. |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10316 | */ |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 10317 | static void perf_addr_filter_apply(struct perf_addr_filter *filter, |
| 10318 | struct mm_struct *mm, |
| 10319 | struct perf_addr_filter_range *fr) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10320 | { |
| 10321 | struct vm_area_struct *vma; |
| 10322 | |
| 10323 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 10324 | if (!vma->vm_file) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10325 | continue; |
| 10326 | |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 10327 | if (perf_addr_filter_vma_adjust(filter, vma, fr)) |
| 10328 | return; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10329 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10330 | } |
| 10331 | |
| 10332 | /* |
| 10333 | * Update event's address range filters based on the |
| 10334 | * task's existing mappings, if any. |
| 10335 | */ |
| 10336 | static void perf_event_addr_filters_apply(struct perf_event *event) |
| 10337 | { |
| 10338 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 10339 | struct task_struct *task = READ_ONCE(event->ctx->task); |
| 10340 | struct perf_addr_filter *filter; |
| 10341 | struct mm_struct *mm = NULL; |
| 10342 | unsigned int count = 0; |
| 10343 | unsigned long flags; |
| 10344 | |
| 10345 | /* |
| 10346 | * We may observe TASK_TOMBSTONE, which means that the event tear-down |
| 10347 | * will stop on the parent's child_mutex that our caller is also holding |
| 10348 | */ |
| 10349 | if (task == TASK_TOMBSTONE) |
| 10350 | return; |
| 10351 | |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 10352 | if (ifh->nr_file_filters) { |
Baptiste Lepers | b89a05b | 2021-09-06 11:53:10 +1000 | [diff] [blame] | 10353 | mm = get_task_mm(task); |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 10354 | if (!mm) |
| 10355 | goto restart; |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 10356 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 10357 | mmap_read_lock(mm); |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 10358 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10359 | |
| 10360 | raw_spin_lock_irqsave(&ifh->lock, flags); |
| 10361 | list_for_each_entry(filter, &ifh->list, entry) { |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 10362 | if (filter->path.dentry) { |
| 10363 | /* |
| 10364 | * Adjust base offset if the filter is associated to a |
| 10365 | * binary that needs to be mapped: |
| 10366 | */ |
| 10367 | event->addr_filter_ranges[count].start = 0; |
| 10368 | event->addr_filter_ranges[count].size = 0; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10369 | |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 10370 | perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 10371 | } else { |
| 10372 | event->addr_filter_ranges[count].start = filter->offset; |
| 10373 | event->addr_filter_ranges[count].size = filter->size; |
| 10374 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10375 | |
| 10376 | count++; |
| 10377 | } |
| 10378 | |
| 10379 | event->addr_filters_gen++; |
| 10380 | raw_spin_unlock_irqrestore(&ifh->lock, flags); |
| 10381 | |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 10382 | if (ifh->nr_file_filters) { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 10383 | mmap_read_unlock(mm); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10384 | |
Alexander Shishkin | 52a44f8 | 2019-03-29 11:12:12 +0200 | [diff] [blame] | 10385 | mmput(mm); |
| 10386 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10387 | |
| 10388 | restart: |
Alexander Shishkin | 767ae08 | 2016-09-06 16:23:49 +0300 | [diff] [blame] | 10389 | perf_event_stop(event, 1); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10390 | } |
| 10391 | |
| 10392 | /* |
| 10393 | * Address range filtering: limiting the data to certain |
| 10394 | * instruction address ranges. Filters are ioctl()ed to us from |
| 10395 | * userspace as ascii strings. |
| 10396 | * |
| 10397 | * Filter string format: |
| 10398 | * |
| 10399 | * ACTION RANGE_SPEC |
| 10400 | * where ACTION is one of the |
| 10401 | * * "filter": limit the trace to this region |
| 10402 | * * "start": start tracing from this address |
| 10403 | * * "stop": stop tracing at this address/region; |
| 10404 | * RANGE_SPEC is |
| 10405 | * * for kernel addresses: <start address>[/<size>] |
| 10406 | * * for object files: <start address>[/<size>]@</path/to/object/file> |
| 10407 | * |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 10408 | * if <size> is not specified or is zero, the range is treated as a single |
| 10409 | * address; not valid for ACTION=="filter". |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10410 | */ |
| 10411 | enum { |
Alexander Shishkin | e96271f | 2016-11-18 13:38:43 +0200 | [diff] [blame] | 10412 | IF_ACT_NONE = -1, |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10413 | IF_ACT_FILTER, |
| 10414 | IF_ACT_START, |
| 10415 | IF_ACT_STOP, |
| 10416 | IF_SRC_FILE, |
| 10417 | IF_SRC_KERNEL, |
| 10418 | IF_SRC_FILEADDR, |
| 10419 | IF_SRC_KERNELADDR, |
| 10420 | }; |
| 10421 | |
| 10422 | enum { |
| 10423 | IF_STATE_ACTION = 0, |
| 10424 | IF_STATE_SOURCE, |
| 10425 | IF_STATE_END, |
| 10426 | }; |
| 10427 | |
| 10428 | static const match_table_t if_tokens = { |
| 10429 | { IF_ACT_FILTER, "filter" }, |
| 10430 | { IF_ACT_START, "start" }, |
| 10431 | { IF_ACT_STOP, "stop" }, |
| 10432 | { IF_SRC_FILE, "%u/%u@%s" }, |
| 10433 | { IF_SRC_KERNEL, "%u/%u" }, |
| 10434 | { IF_SRC_FILEADDR, "%u@%s" }, |
| 10435 | { IF_SRC_KERNELADDR, "%u" }, |
Alexander Shishkin | e96271f | 2016-11-18 13:38:43 +0200 | [diff] [blame] | 10436 | { IF_ACT_NONE, NULL }, |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10437 | }; |
| 10438 | |
| 10439 | /* |
| 10440 | * Address filter string parser |
| 10441 | */ |
| 10442 | static int |
| 10443 | perf_event_parse_addr_filter(struct perf_event *event, char *fstr, |
| 10444 | struct list_head *filters) |
| 10445 | { |
| 10446 | struct perf_addr_filter *filter = NULL; |
| 10447 | char *start, *orig, *filename = NULL; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10448 | substring_t args[MAX_OPT_ARGS]; |
| 10449 | int state = IF_STATE_ACTION, token; |
| 10450 | unsigned int kernel = 0; |
| 10451 | int ret = -EINVAL; |
| 10452 | |
| 10453 | orig = fstr = kstrdup(fstr, GFP_KERNEL); |
| 10454 | if (!fstr) |
| 10455 | return -ENOMEM; |
| 10456 | |
| 10457 | while ((start = strsep(&fstr, " ,\n")) != NULL) { |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 10458 | static const enum perf_addr_filter_action_t actions[] = { |
| 10459 | [IF_ACT_FILTER] = PERF_ADDR_FILTER_ACTION_FILTER, |
| 10460 | [IF_ACT_START] = PERF_ADDR_FILTER_ACTION_START, |
| 10461 | [IF_ACT_STOP] = PERF_ADDR_FILTER_ACTION_STOP, |
| 10462 | }; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10463 | ret = -EINVAL; |
| 10464 | |
| 10465 | if (!*start) |
| 10466 | continue; |
| 10467 | |
| 10468 | /* filter definition begins */ |
| 10469 | if (state == IF_STATE_ACTION) { |
| 10470 | filter = perf_addr_filter_new(event, filters); |
| 10471 | if (!filter) |
| 10472 | goto fail; |
| 10473 | } |
| 10474 | |
| 10475 | token = match_token(start, if_tokens, args); |
| 10476 | switch (token) { |
| 10477 | case IF_ACT_FILTER: |
| 10478 | case IF_ACT_START: |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10479 | case IF_ACT_STOP: |
| 10480 | if (state != IF_STATE_ACTION) |
| 10481 | goto fail; |
| 10482 | |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 10483 | filter->action = actions[token]; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10484 | state = IF_STATE_SOURCE; |
| 10485 | break; |
| 10486 | |
| 10487 | case IF_SRC_KERNELADDR: |
| 10488 | case IF_SRC_KERNEL: |
| 10489 | kernel = 1; |
Gustavo A. R. Silva | df561f66 | 2020-08-23 17:36:59 -0500 | [diff] [blame] | 10490 | fallthrough; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10491 | |
| 10492 | case IF_SRC_FILEADDR: |
| 10493 | case IF_SRC_FILE: |
| 10494 | if (state != IF_STATE_SOURCE) |
| 10495 | goto fail; |
| 10496 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10497 | *args[0].to = 0; |
| 10498 | ret = kstrtoul(args[0].from, 0, &filter->offset); |
| 10499 | if (ret) |
| 10500 | goto fail; |
| 10501 | |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 10502 | if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) { |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10503 | *args[1].to = 0; |
| 10504 | ret = kstrtoul(args[1].from, 0, &filter->size); |
| 10505 | if (ret) |
| 10506 | goto fail; |
| 10507 | } |
| 10508 | |
Mathieu Poirier | 4059ffd | 2016-07-18 10:43:05 -0600 | [diff] [blame] | 10509 | if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 10510 | int fpos = token == IF_SRC_FILE ? 2 : 1; |
Mathieu Poirier | 4059ffd | 2016-07-18 10:43:05 -0600 | [diff] [blame] | 10511 | |
kiyin(尹亮) | 7bdb157 | 2020-11-04 08:23:22 +0300 | [diff] [blame] | 10512 | kfree(filename); |
Mathieu Poirier | 4059ffd | 2016-07-18 10:43:05 -0600 | [diff] [blame] | 10513 | filename = match_strdup(&args[fpos]); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10514 | if (!filename) { |
| 10515 | ret = -ENOMEM; |
| 10516 | goto fail; |
| 10517 | } |
| 10518 | } |
| 10519 | |
| 10520 | state = IF_STATE_END; |
| 10521 | break; |
| 10522 | |
| 10523 | default: |
| 10524 | goto fail; |
| 10525 | } |
| 10526 | |
| 10527 | /* |
| 10528 | * Filter definition is fully parsed, validate and install it. |
| 10529 | * Make sure that it doesn't contradict itself or the event's |
| 10530 | * attribute. |
| 10531 | */ |
| 10532 | if (state == IF_STATE_END) { |
Alexander Shishkin | 9ccbfbb | 2017-01-26 11:40:56 +0200 | [diff] [blame] | 10533 | ret = -EINVAL; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10534 | if (kernel && event->attr.exclude_kernel) |
| 10535 | goto fail; |
| 10536 | |
Alexander Shishkin | 6ed70cf | 2018-03-29 15:06:48 +0300 | [diff] [blame] | 10537 | /* |
| 10538 | * ACTION "filter" must have a non-zero length region |
| 10539 | * specified. |
| 10540 | */ |
| 10541 | if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER && |
| 10542 | !filter->size) |
| 10543 | goto fail; |
| 10544 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10545 | if (!kernel) { |
| 10546 | if (!filename) |
| 10547 | goto fail; |
| 10548 | |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 10549 | /* |
| 10550 | * For now, we only support file-based filters |
| 10551 | * in per-task events; doing so for CPU-wide |
| 10552 | * events requires additional context switching |
| 10553 | * trickery, since same object code will be |
| 10554 | * mapped at different virtual addresses in |
| 10555 | * different processes. |
| 10556 | */ |
| 10557 | ret = -EOPNOTSUPP; |
| 10558 | if (!event->ctx->task) |
kiyin(尹亮) | 7bdb157 | 2020-11-04 08:23:22 +0300 | [diff] [blame] | 10559 | goto fail; |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 10560 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10561 | /* look up the path and grab its inode */ |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 10562 | ret = kern_path(filename, LOOKUP_FOLLOW, |
| 10563 | &filter->path); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10564 | if (ret) |
kiyin(尹亮) | 7bdb157 | 2020-11-04 08:23:22 +0300 | [diff] [blame] | 10565 | goto fail; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10566 | |
| 10567 | ret = -EINVAL; |
Song Liu | 9511bce | 2018-04-17 23:29:07 -0700 | [diff] [blame] | 10568 | if (!filter->path.dentry || |
| 10569 | !S_ISREG(d_inode(filter->path.dentry) |
| 10570 | ->i_mode)) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10571 | goto fail; |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 10572 | |
| 10573 | event->addr_filters.nr_file_filters++; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10574 | } |
| 10575 | |
| 10576 | /* ready to consume more filters */ |
| 10577 | state = IF_STATE_ACTION; |
| 10578 | filter = NULL; |
| 10579 | } |
| 10580 | } |
| 10581 | |
| 10582 | if (state != IF_STATE_ACTION) |
| 10583 | goto fail; |
| 10584 | |
kiyin(尹亮) | 7bdb157 | 2020-11-04 08:23:22 +0300 | [diff] [blame] | 10585 | kfree(filename); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10586 | kfree(orig); |
| 10587 | |
| 10588 | return 0; |
| 10589 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10590 | fail: |
kiyin(尹亮) | 7bdb157 | 2020-11-04 08:23:22 +0300 | [diff] [blame] | 10591 | kfree(filename); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10592 | free_filters_list(filters); |
| 10593 | kfree(orig); |
| 10594 | |
| 10595 | return ret; |
| 10596 | } |
| 10597 | |
| 10598 | static int |
| 10599 | perf_event_set_addr_filter(struct perf_event *event, char *filter_str) |
| 10600 | { |
| 10601 | LIST_HEAD(filters); |
| 10602 | int ret; |
| 10603 | |
| 10604 | /* |
| 10605 | * Since this is called in perf_ioctl() path, we're already holding |
| 10606 | * ctx::mutex. |
| 10607 | */ |
| 10608 | lockdep_assert_held(&event->ctx->mutex); |
| 10609 | |
| 10610 | if (WARN_ON_ONCE(event->parent)) |
| 10611 | return -EINVAL; |
| 10612 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10613 | ret = perf_event_parse_addr_filter(event, filter_str, &filters); |
| 10614 | if (ret) |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 10615 | goto fail_clear_files; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10616 | |
| 10617 | ret = event->pmu->addr_filters_validate(&filters); |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 10618 | if (ret) |
| 10619 | goto fail_free_filters; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10620 | |
| 10621 | /* remove existing filters, if any */ |
| 10622 | perf_addr_filters_splice(event, &filters); |
| 10623 | |
| 10624 | /* install new filters */ |
| 10625 | perf_event_for_each_child(event, perf_event_addr_filters_apply); |
| 10626 | |
| 10627 | return ret; |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 10628 | |
| 10629 | fail_free_filters: |
| 10630 | free_filters_list(&filters); |
| 10631 | |
| 10632 | fail_clear_files: |
| 10633 | event->addr_filters.nr_file_filters = 0; |
| 10634 | |
| 10635 | return ret; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10636 | } |
| 10637 | |
Alexander Shishkin | c796bbb | 2016-04-27 18:44:42 +0300 | [diff] [blame] | 10638 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) |
| 10639 | { |
Alexander Shishkin | c796bbb | 2016-04-27 18:44:42 +0300 | [diff] [blame] | 10640 | int ret = -EINVAL; |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 10641 | char *filter_str; |
Alexander Shishkin | c796bbb | 2016-04-27 18:44:42 +0300 | [diff] [blame] | 10642 | |
| 10643 | filter_str = strndup_user(arg, PAGE_SIZE); |
| 10644 | if (IS_ERR(filter_str)) |
| 10645 | return PTR_ERR(filter_str); |
| 10646 | |
Song Liu | e12f03d | 2017-12-06 14:45:15 -0800 | [diff] [blame] | 10647 | #ifdef CONFIG_EVENT_TRACING |
| 10648 | if (perf_event_is_tracing(event)) { |
| 10649 | struct perf_event_context *ctx = event->ctx; |
| 10650 | |
| 10651 | /* |
| 10652 | * Beware, here be dragons!! |
| 10653 | * |
| 10654 | * the tracepoint muck will deadlock against ctx->mutex, but |
| 10655 | * the tracepoint stuff does not actually need it. So |
| 10656 | * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we |
| 10657 | * already have a reference on ctx. |
| 10658 | * |
| 10659 | * This can result in event getting moved to a different ctx, |
| 10660 | * but that does not affect the tracepoint state. |
| 10661 | */ |
| 10662 | mutex_unlock(&ctx->mutex); |
| 10663 | ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); |
| 10664 | mutex_lock(&ctx->mutex); |
| 10665 | } else |
| 10666 | #endif |
| 10667 | if (has_addr_filter(event)) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 10668 | ret = perf_event_set_addr_filter(event, filter_str); |
Alexander Shishkin | c796bbb | 2016-04-27 18:44:42 +0300 | [diff] [blame] | 10669 | |
| 10670 | kfree(filter_str); |
| 10671 | return ret; |
| 10672 | } |
| 10673 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10674 | /* |
| 10675 | * hrtimer based swevent callback |
| 10676 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10677 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10678 | static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10679 | { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10680 | enum hrtimer_restart ret = HRTIMER_RESTART; |
| 10681 | struct perf_sample_data data; |
| 10682 | struct pt_regs *regs; |
| 10683 | struct perf_event *event; |
| 10684 | u64 period; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10685 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10686 | event = container_of(hrtimer, struct perf_event, hw.hrtimer); |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 10687 | |
| 10688 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
| 10689 | return HRTIMER_NORESTART; |
| 10690 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10691 | event->pmu->read(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10692 | |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 10693 | perf_sample_data_init(&data, 0, event->hw.last_period); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10694 | regs = get_irq_regs(); |
| 10695 | |
| 10696 | if (regs && !perf_exclude_event(event, regs)) { |
Paul E. McKenney | 77aeeeb | 2011-11-10 16:02:52 -0800 | [diff] [blame] | 10697 | if (!(event->attr.exclude_idle && is_idle_task(current))) |
Robert Richter | 33b07b8 | 2012-04-05 18:24:43 +0200 | [diff] [blame] | 10698 | if (__perf_event_overflow(event, 1, &data, regs)) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10699 | ret = HRTIMER_NORESTART; |
| 10700 | } |
| 10701 | |
| 10702 | period = max_t(u64, 10000, event->hw.sample_period); |
| 10703 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); |
| 10704 | |
| 10705 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10706 | } |
| 10707 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10708 | static void perf_swevent_start_hrtimer(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 10709 | { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10710 | struct hw_perf_event *hwc = &event->hw; |
Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 10711 | s64 period; |
| 10712 | |
| 10713 | if (!is_sampling_event(event)) |
| 10714 | return; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10715 | |
Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 10716 | period = local64_read(&hwc->period_left); |
| 10717 | if (period) { |
| 10718 | if (period < 0) |
| 10719 | period = 10000; |
Peter Zijlstra | fa407f3 | 2010-06-24 12:35:12 +0200 | [diff] [blame] | 10720 | |
Franck Bui-Huu | 5d508e8 | 2010-11-23 16:21:45 +0100 | [diff] [blame] | 10721 | local64_set(&hwc->period_left, 0); |
| 10722 | } else { |
| 10723 | period = max_t(u64, 10000, hwc->sample_period); |
| 10724 | } |
Thomas Gleixner | 3497d20 | 2015-04-14 21:09:03 +0000 | [diff] [blame] | 10725 | hrtimer_start(&hwc->hrtimer, ns_to_ktime(period), |
Sebastian Andrzej Siewior | 30f9028 | 2019-07-26 20:30:53 +0200 | [diff] [blame] | 10726 | HRTIMER_MODE_REL_PINNED_HARD); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10727 | } |
| 10728 | |
| 10729 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) |
| 10730 | { |
| 10731 | struct hw_perf_event *hwc = &event->hw; |
| 10732 | |
Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 10733 | if (is_sampling_event(event)) { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10734 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); |
Peter Zijlstra | fa407f3 | 2010-06-24 12:35:12 +0200 | [diff] [blame] | 10735 | local64_set(&hwc->period_left, ktime_to_ns(remaining)); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10736 | |
| 10737 | hrtimer_cancel(&hwc->hrtimer); |
| 10738 | } |
| 10739 | } |
| 10740 | |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 10741 | static void perf_swevent_init_hrtimer(struct perf_event *event) |
| 10742 | { |
| 10743 | struct hw_perf_event *hwc = &event->hw; |
| 10744 | |
| 10745 | if (!is_sampling_event(event)) |
| 10746 | return; |
| 10747 | |
Sebastian Andrzej Siewior | 30f9028 | 2019-07-26 20:30:53 +0200 | [diff] [blame] | 10748 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 10749 | hwc->hrtimer.function = perf_swevent_hrtimer; |
| 10750 | |
| 10751 | /* |
| 10752 | * Since hrtimers have a fixed rate, we can do a static freq->period |
| 10753 | * mapping and avoid the whole period adjust feedback stuff. |
| 10754 | */ |
| 10755 | if (event->attr.freq) { |
| 10756 | long freq = event->attr.sample_freq; |
| 10757 | |
| 10758 | event->attr.sample_period = NSEC_PER_SEC / freq; |
| 10759 | hwc->sample_period = event->attr.sample_period; |
| 10760 | local64_set(&hwc->period_left, hwc->sample_period); |
Namhyung Kim | 778141e | 2013-03-18 11:41:46 +0900 | [diff] [blame] | 10761 | hwc->last_period = hwc->sample_period; |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 10762 | event->attr.freq = 0; |
| 10763 | } |
| 10764 | } |
| 10765 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10766 | /* |
| 10767 | * Software event: cpu wall time clock |
| 10768 | */ |
| 10769 | |
| 10770 | static void cpu_clock_event_update(struct perf_event *event) |
| 10771 | { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10772 | s64 prev; |
| 10773 | u64 now; |
| 10774 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10775 | now = local_clock(); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10776 | prev = local64_xchg(&event->hw.prev_count, now); |
| 10777 | local64_add(now - prev, &event->count); |
| 10778 | } |
| 10779 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10780 | static void cpu_clock_event_start(struct perf_event *event, int flags) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10781 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10782 | local64_set(&event->hw.prev_count, local_clock()); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10783 | perf_swevent_start_hrtimer(event); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10784 | } |
| 10785 | |
| 10786 | static void cpu_clock_event_stop(struct perf_event *event, int flags) |
| 10787 | { |
| 10788 | perf_swevent_cancel_hrtimer(event); |
| 10789 | cpu_clock_event_update(event); |
| 10790 | } |
| 10791 | |
| 10792 | static int cpu_clock_event_add(struct perf_event *event, int flags) |
| 10793 | { |
| 10794 | if (flags & PERF_EF_START) |
| 10795 | cpu_clock_event_start(event, flags); |
Shaohua Li | 6a694a6 | 2015-02-05 15:55:32 -0800 | [diff] [blame] | 10796 | perf_event_update_userpage(event); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10797 | |
| 10798 | return 0; |
| 10799 | } |
| 10800 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10801 | static void cpu_clock_event_del(struct perf_event *event, int flags) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10802 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10803 | cpu_clock_event_stop(event, flags); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10804 | } |
| 10805 | |
| 10806 | static void cpu_clock_event_read(struct perf_event *event) |
| 10807 | { |
| 10808 | cpu_clock_event_update(event); |
| 10809 | } |
| 10810 | |
| 10811 | static int cpu_clock_event_init(struct perf_event *event) |
| 10812 | { |
| 10813 | if (event->attr.type != PERF_TYPE_SOFTWARE) |
| 10814 | return -ENOENT; |
| 10815 | |
| 10816 | if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) |
| 10817 | return -ENOENT; |
| 10818 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 10819 | /* |
| 10820 | * no branch sampling for software events |
| 10821 | */ |
| 10822 | if (has_branch_stack(event)) |
| 10823 | return -EOPNOTSUPP; |
| 10824 | |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 10825 | perf_swevent_init_hrtimer(event); |
| 10826 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10827 | return 0; |
| 10828 | } |
| 10829 | |
| 10830 | static struct pmu perf_cpu_clock = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 10831 | .task_ctx_nr = perf_sw_context, |
| 10832 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 10833 | .capabilities = PERF_PMU_CAP_NO_NMI, |
| 10834 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10835 | .event_init = cpu_clock_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10836 | .add = cpu_clock_event_add, |
| 10837 | .del = cpu_clock_event_del, |
| 10838 | .start = cpu_clock_event_start, |
| 10839 | .stop = cpu_clock_event_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10840 | .read = cpu_clock_event_read, |
| 10841 | }; |
| 10842 | |
| 10843 | /* |
| 10844 | * Software event: task time clock |
| 10845 | */ |
| 10846 | |
| 10847 | static void task_clock_event_update(struct perf_event *event, u64 now) |
| 10848 | { |
| 10849 | u64 prev; |
| 10850 | s64 delta; |
| 10851 | |
| 10852 | prev = local64_xchg(&event->hw.prev_count, now); |
| 10853 | delta = now - prev; |
| 10854 | local64_add(delta, &event->count); |
| 10855 | } |
| 10856 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10857 | static void task_clock_event_start(struct perf_event *event, int flags) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10858 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10859 | local64_set(&event->hw.prev_count, event->ctx->time); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10860 | perf_swevent_start_hrtimer(event); |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10861 | } |
| 10862 | |
| 10863 | static void task_clock_event_stop(struct perf_event *event, int flags) |
| 10864 | { |
| 10865 | perf_swevent_cancel_hrtimer(event); |
| 10866 | task_clock_event_update(event, event->ctx->time); |
| 10867 | } |
| 10868 | |
| 10869 | static int task_clock_event_add(struct perf_event *event, int flags) |
| 10870 | { |
| 10871 | if (flags & PERF_EF_START) |
| 10872 | task_clock_event_start(event, flags); |
Shaohua Li | 6a694a6 | 2015-02-05 15:55:32 -0800 | [diff] [blame] | 10873 | perf_event_update_userpage(event); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10874 | |
| 10875 | return 0; |
| 10876 | } |
| 10877 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10878 | static void task_clock_event_del(struct perf_event *event, int flags) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10879 | { |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10880 | task_clock_event_stop(event, PERF_EF_UPDATE); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10881 | } |
| 10882 | |
| 10883 | static void task_clock_event_read(struct perf_event *event) |
| 10884 | { |
Peter Zijlstra | 768a06e | 2011-02-22 16:52:24 +0100 | [diff] [blame] | 10885 | u64 now = perf_clock(); |
| 10886 | u64 delta = now - event->ctx->timestamp; |
| 10887 | u64 time = event->ctx->time + delta; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10888 | |
| 10889 | task_clock_event_update(event, time); |
| 10890 | } |
| 10891 | |
| 10892 | static int task_clock_event_init(struct perf_event *event) |
| 10893 | { |
| 10894 | if (event->attr.type != PERF_TYPE_SOFTWARE) |
| 10895 | return -ENOENT; |
| 10896 | |
| 10897 | if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) |
| 10898 | return -ENOENT; |
| 10899 | |
Stephane Eranian | 2481c5f | 2012-02-09 23:20:59 +0100 | [diff] [blame] | 10900 | /* |
| 10901 | * no branch sampling for software events |
| 10902 | */ |
| 10903 | if (has_branch_stack(event)) |
| 10904 | return -EOPNOTSUPP; |
| 10905 | |
Peter Zijlstra | ba3dd36 | 2011-02-15 12:41:46 +0100 | [diff] [blame] | 10906 | perf_swevent_init_hrtimer(event); |
| 10907 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10908 | return 0; |
| 10909 | } |
| 10910 | |
| 10911 | static struct pmu perf_task_clock = { |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 10912 | .task_ctx_nr = perf_sw_context, |
| 10913 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 10914 | .capabilities = PERF_PMU_CAP_NO_NMI, |
| 10915 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10916 | .event_init = task_clock_event_init, |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 10917 | .add = task_clock_event_add, |
| 10918 | .del = task_clock_event_del, |
| 10919 | .start = task_clock_event_start, |
| 10920 | .stop = task_clock_event_stop, |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 10921 | .read = task_clock_event_read, |
| 10922 | }; |
| 10923 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10924 | static void perf_pmu_nop_void(struct pmu *pmu) |
| 10925 | { |
| 10926 | } |
| 10927 | |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10928 | static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) |
| 10929 | { |
| 10930 | } |
| 10931 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10932 | static int perf_pmu_nop_int(struct pmu *pmu) |
| 10933 | { |
| 10934 | return 0; |
| 10935 | } |
| 10936 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 10937 | static int perf_event_nop_int(struct perf_event *event, u64 value) |
| 10938 | { |
| 10939 | return 0; |
| 10940 | } |
| 10941 | |
Geliang Tang | 18ab2cd | 2015-09-27 23:25:50 +0800 | [diff] [blame] | 10942 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10943 | |
| 10944 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10945 | { |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10946 | __this_cpu_write(nop_txn_flags, flags); |
| 10947 | |
| 10948 | if (flags & ~PERF_PMU_TXN_ADD) |
| 10949 | return; |
| 10950 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10951 | perf_pmu_disable(pmu); |
| 10952 | } |
| 10953 | |
| 10954 | static int perf_pmu_commit_txn(struct pmu *pmu) |
| 10955 | { |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10956 | unsigned int flags = __this_cpu_read(nop_txn_flags); |
| 10957 | |
| 10958 | __this_cpu_write(nop_txn_flags, 0); |
| 10959 | |
| 10960 | if (flags & ~PERF_PMU_TXN_ADD) |
| 10961 | return 0; |
| 10962 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10963 | perf_pmu_enable(pmu); |
| 10964 | return 0; |
| 10965 | } |
| 10966 | |
| 10967 | static void perf_pmu_cancel_txn(struct pmu *pmu) |
| 10968 | { |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 10969 | unsigned int flags = __this_cpu_read(nop_txn_flags); |
| 10970 | |
| 10971 | __this_cpu_write(nop_txn_flags, 0); |
| 10972 | |
| 10973 | if (flags & ~PERF_PMU_TXN_ADD) |
| 10974 | return; |
| 10975 | |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 10976 | perf_pmu_enable(pmu); |
| 10977 | } |
| 10978 | |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 10979 | static int perf_event_idx_default(struct perf_event *event) |
| 10980 | { |
Peter Zijlstra | c719f56 | 2014-10-21 11:10:21 +0200 | [diff] [blame] | 10981 | return 0; |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 10982 | } |
| 10983 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 10984 | /* |
| 10985 | * Ensures all contexts with the same task_ctx_nr have the same |
| 10986 | * pmu_cpu_context too. |
| 10987 | */ |
Mark Rutland | 9e31704 | 2014-02-10 17:44:18 +0000 | [diff] [blame] | 10988 | static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 10989 | { |
| 10990 | struct pmu *pmu; |
| 10991 | |
| 10992 | if (ctxn < 0) |
| 10993 | return NULL; |
| 10994 | |
| 10995 | list_for_each_entry(pmu, &pmus, entry) { |
| 10996 | if (pmu->task_ctx_nr == ctxn) |
| 10997 | return pmu->pmu_cpu_context; |
| 10998 | } |
| 10999 | |
| 11000 | return NULL; |
| 11001 | } |
| 11002 | |
Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 11003 | static void free_pmu_context(struct pmu *pmu) |
| 11004 | { |
Will Deacon | df0062b | 2017-10-03 15:20:50 +0100 | [diff] [blame] | 11005 | /* |
| 11006 | * Static contexts such as perf_sw_context have a global lifetime |
| 11007 | * and may be shared between different PMUs. Avoid freeing them |
| 11008 | * when a single PMU is going away. |
| 11009 | */ |
| 11010 | if (pmu->task_ctx_nr > perf_invalid_context) |
| 11011 | return; |
| 11012 | |
Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 11013 | free_percpu(pmu->pmu_cpu_context); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11014 | } |
Alexander Shishkin | 6e855cd | 2016-04-27 18:44:48 +0300 | [diff] [blame] | 11015 | |
| 11016 | /* |
| 11017 | * Let userspace know that this PMU supports address range filtering: |
| 11018 | */ |
| 11019 | static ssize_t nr_addr_filters_show(struct device *dev, |
| 11020 | struct device_attribute *attr, |
| 11021 | char *page) |
| 11022 | { |
| 11023 | struct pmu *pmu = dev_get_drvdata(dev); |
| 11024 | |
| 11025 | return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters); |
| 11026 | } |
| 11027 | DEVICE_ATTR_RO(nr_addr_filters); |
| 11028 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11029 | static struct idr pmu_idr; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11030 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11031 | static ssize_t |
| 11032 | type_show(struct device *dev, struct device_attribute *attr, char *page) |
| 11033 | { |
| 11034 | struct pmu *pmu = dev_get_drvdata(dev); |
| 11035 | |
| 11036 | return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); |
| 11037 | } |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 11038 | static DEVICE_ATTR_RO(type); |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11039 | |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 11040 | static ssize_t |
| 11041 | perf_event_mux_interval_ms_show(struct device *dev, |
| 11042 | struct device_attribute *attr, |
| 11043 | char *page) |
| 11044 | { |
| 11045 | struct pmu *pmu = dev_get_drvdata(dev); |
| 11046 | |
| 11047 | return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); |
| 11048 | } |
| 11049 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 11050 | static DEFINE_MUTEX(mux_interval_mutex); |
| 11051 | |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 11052 | static ssize_t |
| 11053 | perf_event_mux_interval_ms_store(struct device *dev, |
| 11054 | struct device_attribute *attr, |
| 11055 | const char *buf, size_t count) |
| 11056 | { |
| 11057 | struct pmu *pmu = dev_get_drvdata(dev); |
| 11058 | int timer, cpu, ret; |
| 11059 | |
| 11060 | ret = kstrtoint(buf, 0, &timer); |
| 11061 | if (ret) |
| 11062 | return ret; |
| 11063 | |
| 11064 | if (timer < 1) |
| 11065 | return -EINVAL; |
| 11066 | |
| 11067 | /* same value, noting to do */ |
| 11068 | if (timer == pmu->hrtimer_interval_ms) |
| 11069 | return count; |
| 11070 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 11071 | mutex_lock(&mux_interval_mutex); |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 11072 | pmu->hrtimer_interval_ms = timer; |
| 11073 | |
| 11074 | /* update all cpuctx for this PMU */ |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 11075 | cpus_read_lock(); |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 11076 | for_each_online_cpu(cpu) { |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 11077 | struct perf_cpu_context *cpuctx; |
| 11078 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
| 11079 | cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); |
| 11080 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 11081 | cpu_function_call(cpu, |
| 11082 | (remote_function_f)perf_mux_hrtimer_restart, cpuctx); |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 11083 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 11084 | cpus_read_unlock(); |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 11085 | mutex_unlock(&mux_interval_mutex); |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 11086 | |
| 11087 | return count; |
| 11088 | } |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 11089 | static DEVICE_ATTR_RW(perf_event_mux_interval_ms); |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 11090 | |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 11091 | static struct attribute *pmu_dev_attrs[] = { |
| 11092 | &dev_attr_type.attr, |
| 11093 | &dev_attr_perf_event_mux_interval_ms.attr, |
| 11094 | NULL, |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11095 | }; |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 11096 | ATTRIBUTE_GROUPS(pmu_dev); |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11097 | |
| 11098 | static int pmu_bus_running; |
| 11099 | static struct bus_type pmu_bus = { |
| 11100 | .name = "event_source", |
Greg Kroah-Hartman | 90826ca | 2013-08-23 14:24:40 -0700 | [diff] [blame] | 11101 | .dev_groups = pmu_dev_groups, |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11102 | }; |
| 11103 | |
| 11104 | static void pmu_dev_release(struct device *dev) |
| 11105 | { |
| 11106 | kfree(dev); |
| 11107 | } |
| 11108 | |
| 11109 | static int pmu_dev_alloc(struct pmu *pmu) |
| 11110 | { |
| 11111 | int ret = -ENOMEM; |
| 11112 | |
| 11113 | pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
| 11114 | if (!pmu->dev) |
| 11115 | goto out; |
| 11116 | |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 11117 | pmu->dev->groups = pmu->attr_groups; |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11118 | device_initialize(pmu->dev); |
| 11119 | ret = dev_set_name(pmu->dev, "%s", pmu->name); |
| 11120 | if (ret) |
| 11121 | goto free_dev; |
| 11122 | |
| 11123 | dev_set_drvdata(pmu->dev, pmu); |
| 11124 | pmu->dev->bus = &pmu_bus; |
| 11125 | pmu->dev->release = pmu_dev_release; |
| 11126 | ret = device_add(pmu->dev); |
| 11127 | if (ret) |
| 11128 | goto free_dev; |
| 11129 | |
Alexander Shishkin | 6e855cd | 2016-04-27 18:44:48 +0300 | [diff] [blame] | 11130 | /* For PMUs with address filters, throw in an extra attribute: */ |
| 11131 | if (pmu->nr_addr_filters) |
| 11132 | ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters); |
| 11133 | |
| 11134 | if (ret) |
| 11135 | goto del_dev; |
| 11136 | |
Jiri Olsa | f3a3a82 | 2019-05-12 17:55:11 +0200 | [diff] [blame] | 11137 | if (pmu->attr_update) |
| 11138 | ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update); |
| 11139 | |
| 11140 | if (ret) |
| 11141 | goto del_dev; |
| 11142 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11143 | out: |
| 11144 | return ret; |
| 11145 | |
Alexander Shishkin | 6e855cd | 2016-04-27 18:44:48 +0300 | [diff] [blame] | 11146 | del_dev: |
| 11147 | device_del(pmu->dev); |
| 11148 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11149 | free_dev: |
| 11150 | put_device(pmu->dev); |
| 11151 | goto out; |
| 11152 | } |
| 11153 | |
Peter Zijlstra | 547e9fd | 2011-01-19 12:51:39 +0100 | [diff] [blame] | 11154 | static struct lock_class_key cpuctx_mutex; |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 11155 | static struct lock_class_key cpuctx_lock; |
Peter Zijlstra | 547e9fd | 2011-01-19 12:51:39 +0100 | [diff] [blame] | 11156 | |
Mischa Jonker | 03d8e80 | 2013-06-04 11:45:48 +0200 | [diff] [blame] | 11157 | int perf_pmu_register(struct pmu *pmu, const char *name, int type) |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11158 | { |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11159 | int cpu, ret, max = PERF_TYPE_MAX; |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 11160 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11161 | mutex_lock(&pmus_lock); |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 11162 | ret = -ENOMEM; |
| 11163 | pmu->pmu_disable_count = alloc_percpu(int); |
| 11164 | if (!pmu->pmu_disable_count) |
| 11165 | goto unlock; |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 11166 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11167 | pmu->type = -1; |
| 11168 | if (!name) |
| 11169 | goto skip_type; |
| 11170 | pmu->name = name; |
| 11171 | |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11172 | if (type != PERF_TYPE_SOFTWARE) { |
| 11173 | if (type >= 0) |
| 11174 | max = type; |
| 11175 | |
| 11176 | ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); |
| 11177 | if (ret < 0) |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11178 | goto free_pdc; |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11179 | |
| 11180 | WARN_ON(type >= 0 && ret != type); |
| 11181 | |
| 11182 | type = ret; |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11183 | } |
| 11184 | pmu->type = type; |
| 11185 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11186 | if (pmu_bus_running) { |
| 11187 | ret = pmu_dev_alloc(pmu); |
| 11188 | if (ret) |
| 11189 | goto free_idr; |
| 11190 | } |
| 11191 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11192 | skip_type: |
Peter Zijlstra | 2665784 | 2016-03-22 22:09:18 +0100 | [diff] [blame] | 11193 | if (pmu->task_ctx_nr == perf_hw_context) { |
| 11194 | static int hw_context_taken = 0; |
| 11195 | |
Mark Rutland | 5101ef2 | 2016-04-26 11:33:46 +0100 | [diff] [blame] | 11196 | /* |
| 11197 | * Other than systems with heterogeneous CPUs, it never makes |
| 11198 | * sense for two PMUs to share perf_hw_context. PMUs which are |
| 11199 | * uncore must use perf_invalid_context. |
| 11200 | */ |
| 11201 | if (WARN_ON_ONCE(hw_context_taken && |
| 11202 | !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS))) |
Peter Zijlstra | 2665784 | 2016-03-22 22:09:18 +0100 | [diff] [blame] | 11203 | pmu->task_ctx_nr = perf_invalid_context; |
| 11204 | |
| 11205 | hw_context_taken = 1; |
| 11206 | } |
| 11207 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11208 | pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); |
| 11209 | if (pmu->pmu_cpu_context) |
| 11210 | goto got_cpu_context; |
| 11211 | |
Wei Yongjun | c481420 | 2013-04-12 11:05:54 +0800 | [diff] [blame] | 11212 | ret = -ENOMEM; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 11213 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); |
| 11214 | if (!pmu->pmu_cpu_context) |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11215 | goto free_dev; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 11216 | |
| 11217 | for_each_possible_cpu(cpu) { |
| 11218 | struct perf_cpu_context *cpuctx; |
| 11219 | |
| 11220 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
Peter Zijlstra | eb18447 | 2010-09-07 15:55:13 +0200 | [diff] [blame] | 11221 | __perf_event_init_context(&cpuctx->ctx); |
Peter Zijlstra | 547e9fd | 2011-01-19 12:51:39 +0100 | [diff] [blame] | 11222 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); |
Peter Zijlstra | facc430 | 2011-04-09 21:17:42 +0200 | [diff] [blame] | 11223 | lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 11224 | cpuctx->ctx.pmu = pmu; |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 11225 | cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask); |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 11226 | |
Peter Zijlstra | 272325c | 2015-04-15 11:41:58 +0200 | [diff] [blame] | 11227 | __perf_mux_hrtimer_init(cpuctx, cpu); |
Ian Rogers | 836196be | 2020-02-13 23:51:31 -0800 | [diff] [blame] | 11228 | |
| 11229 | cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default); |
| 11230 | cpuctx->heap = cpuctx->heap_default; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 11231 | } |
| 11232 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 11233 | got_cpu_context: |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 11234 | if (!pmu->start_txn) { |
| 11235 | if (pmu->pmu_enable) { |
| 11236 | /* |
| 11237 | * If we have pmu_enable/pmu_disable calls, install |
| 11238 | * transaction stubs that use that to try and batch |
| 11239 | * hardware accesses. |
| 11240 | */ |
| 11241 | pmu->start_txn = perf_pmu_start_txn; |
| 11242 | pmu->commit_txn = perf_pmu_commit_txn; |
| 11243 | pmu->cancel_txn = perf_pmu_cancel_txn; |
| 11244 | } else { |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 11245 | pmu->start_txn = perf_pmu_nop_txn; |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 11246 | pmu->commit_txn = perf_pmu_nop_int; |
| 11247 | pmu->cancel_txn = perf_pmu_nop_void; |
| 11248 | } |
| 11249 | } |
| 11250 | |
| 11251 | if (!pmu->pmu_enable) { |
| 11252 | pmu->pmu_enable = perf_pmu_nop_void; |
| 11253 | pmu->pmu_disable = perf_pmu_nop_void; |
| 11254 | } |
| 11255 | |
Jiri Olsa | 81ec3f3 | 2019-02-04 13:35:32 +0100 | [diff] [blame] | 11256 | if (!pmu->check_period) |
| 11257 | pmu->check_period = perf_event_nop_int; |
| 11258 | |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 11259 | if (!pmu->event_idx) |
| 11260 | pmu->event_idx = perf_event_idx_default; |
| 11261 | |
Liang, Kan | d44f821 | 2019-10-22 11:13:09 +0200 | [diff] [blame] | 11262 | /* |
| 11263 | * Ensure the TYPE_SOFTWARE PMUs are at the head of the list, |
| 11264 | * since these cannot be in the IDR. This way the linear search |
| 11265 | * is fast, provided a valid software event is provided. |
| 11266 | */ |
| 11267 | if (type == PERF_TYPE_SOFTWARE || !name) |
| 11268 | list_add_rcu(&pmu->entry, &pmus); |
| 11269 | else |
| 11270 | list_add_tail_rcu(&pmu->entry, &pmus); |
| 11271 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 11272 | atomic_set(&pmu->exclusive_cnt, 0); |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 11273 | ret = 0; |
| 11274 | unlock: |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11275 | mutex_unlock(&pmus_lock); |
| 11276 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 11277 | return ret; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 11278 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 11279 | free_dev: |
| 11280 | device_del(pmu->dev); |
| 11281 | put_device(pmu->dev); |
| 11282 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11283 | free_idr: |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11284 | if (pmu->type != PERF_TYPE_SOFTWARE) |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11285 | idr_remove(&pmu_idr, pmu->type); |
| 11286 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 11287 | free_pdc: |
| 11288 | free_percpu(pmu->pmu_disable_count); |
| 11289 | goto unlock; |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11290 | } |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 11291 | EXPORT_SYMBOL_GPL(perf_pmu_register); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11292 | |
| 11293 | void perf_pmu_unregister(struct pmu *pmu) |
| 11294 | { |
| 11295 | mutex_lock(&pmus_lock); |
| 11296 | list_del_rcu(&pmu->entry); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11297 | |
| 11298 | /* |
Peter Zijlstra | cde8e88 | 2010-09-13 11:06:55 +0200 | [diff] [blame] | 11299 | * We dereference the pmu list under both SRCU and regular RCU, so |
| 11300 | * synchronize against both of those. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11301 | */ |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11302 | synchronize_srcu(&pmus_srcu); |
Peter Zijlstra | cde8e88 | 2010-09-13 11:06:55 +0200 | [diff] [blame] | 11303 | synchronize_rcu(); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11304 | |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 11305 | free_percpu(pmu->pmu_disable_count); |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11306 | if (pmu->type != PERF_TYPE_SOFTWARE) |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11307 | idr_remove(&pmu_idr, pmu->type); |
Peter Zijlstra | a9f9772 | 2018-09-25 17:58:35 +0200 | [diff] [blame] | 11308 | if (pmu_bus_running) { |
Jiri Olsa | 0933840 | 2016-10-20 13:10:11 +0200 | [diff] [blame] | 11309 | if (pmu->nr_addr_filters) |
| 11310 | device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); |
| 11311 | device_del(pmu->dev); |
| 11312 | put_device(pmu->dev); |
| 11313 | } |
Peter Zijlstra | 5167695 | 2010-12-07 14:18:20 +0100 | [diff] [blame] | 11314 | free_pmu_context(pmu); |
Peter Zijlstra | a9f9772 | 2018-09-25 17:58:35 +0200 | [diff] [blame] | 11315 | mutex_unlock(&pmus_lock); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11316 | } |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 11317 | EXPORT_SYMBOL_GPL(perf_pmu_unregister); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11318 | |
Kan Liang | e321d02 | 2019-05-28 15:08:30 -0700 | [diff] [blame] | 11319 | static inline bool has_extended_regs(struct perf_event *event) |
| 11320 | { |
| 11321 | return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || |
| 11322 | (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); |
| 11323 | } |
| 11324 | |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 11325 | static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) |
| 11326 | { |
Peter Zijlstra | ccd41c8 | 2015-02-25 15:56:04 +0100 | [diff] [blame] | 11327 | struct perf_event_context *ctx = NULL; |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 11328 | int ret; |
| 11329 | |
| 11330 | if (!try_module_get(pmu->module)) |
| 11331 | return -ENODEV; |
Peter Zijlstra | ccd41c8 | 2015-02-25 15:56:04 +0100 | [diff] [blame] | 11332 | |
Peter Zijlstra | 0c7296c | 2018-01-09 21:23:02 +0100 | [diff] [blame] | 11333 | /* |
| 11334 | * A number of pmu->event_init() methods iterate the sibling_list to, |
| 11335 | * for example, validate if the group fits on the PMU. Therefore, |
| 11336 | * if this is a sibling event, acquire the ctx->mutex to protect |
| 11337 | * the sibling_list. |
| 11338 | */ |
| 11339 | if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { |
Peter Zijlstra | 8b10c5e | 2015-05-01 16:08:46 +0200 | [diff] [blame] | 11340 | /* |
| 11341 | * This ctx->mutex can nest when we're called through |
| 11342 | * inheritance. See the perf_event_ctx_lock_nested() comment. |
| 11343 | */ |
| 11344 | ctx = perf_event_ctx_lock_nested(event->group_leader, |
| 11345 | SINGLE_DEPTH_NESTING); |
Peter Zijlstra | ccd41c8 | 2015-02-25 15:56:04 +0100 | [diff] [blame] | 11346 | BUG_ON(!ctx); |
| 11347 | } |
| 11348 | |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 11349 | event->pmu = pmu; |
| 11350 | ret = pmu->event_init(event); |
Peter Zijlstra | ccd41c8 | 2015-02-25 15:56:04 +0100 | [diff] [blame] | 11351 | |
| 11352 | if (ctx) |
| 11353 | perf_event_ctx_unlock(event->group_leader, ctx); |
| 11354 | |
Andrew Murray | cc6795a | 2019-01-10 13:53:25 +0000 | [diff] [blame] | 11355 | if (!ret) { |
Kan Liang | e321d02 | 2019-05-28 15:08:30 -0700 | [diff] [blame] | 11356 | if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && |
| 11357 | has_extended_regs(event)) |
| 11358 | ret = -EOPNOTSUPP; |
| 11359 | |
Andrew Murray | cc6795a | 2019-01-10 13:53:25 +0000 | [diff] [blame] | 11360 | if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && |
Kan Liang | e321d02 | 2019-05-28 15:08:30 -0700 | [diff] [blame] | 11361 | event_has_any_exclude_flag(event)) |
Andrew Murray | cc6795a | 2019-01-10 13:53:25 +0000 | [diff] [blame] | 11362 | ret = -EINVAL; |
Kan Liang | e321d02 | 2019-05-28 15:08:30 -0700 | [diff] [blame] | 11363 | |
| 11364 | if (ret && event->destroy) |
| 11365 | event->destroy(event); |
Andrew Murray | cc6795a | 2019-01-10 13:53:25 +0000 | [diff] [blame] | 11366 | } |
| 11367 | |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 11368 | if (ret) |
| 11369 | module_put(pmu->module); |
| 11370 | |
| 11371 | return ret; |
| 11372 | } |
| 11373 | |
Geliang Tang | 18ab2cd | 2015-09-27 23:25:50 +0800 | [diff] [blame] | 11374 | static struct pmu *perf_init_event(struct perf_event *event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11375 | { |
Kan Liang | 55bcf6e | 2021-04-12 07:31:01 -0700 | [diff] [blame] | 11376 | bool extended_type = false; |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11377 | int idx, type, ret; |
Dan Carpenter | 85c617a | 2017-05-22 12:03:49 +0300 | [diff] [blame] | 11378 | struct pmu *pmu; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 11379 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11380 | idx = srcu_read_lock(&pmus_srcu); |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11381 | |
Kan Liang | 4099931 | 2017-01-18 08:21:01 -0500 | [diff] [blame] | 11382 | /* Try parent's PMU first: */ |
| 11383 | if (event->parent && event->parent->pmu) { |
| 11384 | pmu = event->parent->pmu; |
| 11385 | ret = perf_try_init_event(pmu, event); |
| 11386 | if (!ret) |
| 11387 | goto unlock; |
| 11388 | } |
| 11389 | |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11390 | /* |
| 11391 | * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE |
| 11392 | * are often aliases for PERF_TYPE_RAW. |
| 11393 | */ |
| 11394 | type = event->attr.type; |
Kan Liang | 55bcf6e | 2021-04-12 07:31:01 -0700 | [diff] [blame] | 11395 | if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) { |
| 11396 | type = event->attr.config >> PERF_PMU_TYPE_SHIFT; |
| 11397 | if (!type) { |
| 11398 | type = PERF_TYPE_RAW; |
| 11399 | } else { |
| 11400 | extended_type = true; |
| 11401 | event->attr.config &= PERF_HW_EVENT_MASK; |
| 11402 | } |
| 11403 | } |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11404 | |
| 11405 | again: |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11406 | rcu_read_lock(); |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11407 | pmu = idr_find(&pmu_idr, type); |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11408 | rcu_read_unlock(); |
Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 11409 | if (pmu) { |
Kan Liang | 55bcf6e | 2021-04-12 07:31:01 -0700 | [diff] [blame] | 11410 | if (event->attr.type != type && type != PERF_TYPE_RAW && |
| 11411 | !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)) |
| 11412 | goto fail; |
| 11413 | |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 11414 | ret = perf_try_init_event(pmu, event); |
Kan Liang | 55bcf6e | 2021-04-12 07:31:01 -0700 | [diff] [blame] | 11415 | if (ret == -ENOENT && event->attr.type != type && !extended_type) { |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11416 | type = event->attr.type; |
| 11417 | goto again; |
| 11418 | } |
| 11419 | |
Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 11420 | if (ret) |
| 11421 | pmu = ERR_PTR(ret); |
Peter Zijlstra | 66d258c | 2019-10-17 20:31:03 +0200 | [diff] [blame] | 11422 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11423 | goto unlock; |
Lin Ming | 940c5b2 | 2011-02-27 21:13:31 +0800 | [diff] [blame] | 11424 | } |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 11425 | |
Sebastian Andrzej Siewior | 9f0bff1 | 2019-11-19 13:14:29 +0100 | [diff] [blame] | 11426 | list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) { |
Mark Rutland | cc34b98 | 2015-01-07 14:56:51 +0000 | [diff] [blame] | 11427 | ret = perf_try_init_event(pmu, event); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11428 | if (!ret) |
Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 11429 | goto unlock; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 11430 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11431 | if (ret != -ENOENT) { |
| 11432 | pmu = ERR_PTR(ret); |
Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 11433 | goto unlock; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11434 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11435 | } |
Kan Liang | 55bcf6e | 2021-04-12 07:31:01 -0700 | [diff] [blame] | 11436 | fail: |
Peter Zijlstra | e5f4d33 | 2010-09-10 17:38:06 +0200 | [diff] [blame] | 11437 | pmu = ERR_PTR(-ENOENT); |
| 11438 | unlock: |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11439 | srcu_read_unlock(&pmus_srcu, idx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11440 | |
| 11441 | return pmu; |
| 11442 | } |
| 11443 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 11444 | static void attach_sb_event(struct perf_event *event) |
| 11445 | { |
| 11446 | struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); |
| 11447 | |
| 11448 | raw_spin_lock(&pel->lock); |
| 11449 | list_add_rcu(&event->sb_list, &pel->list); |
| 11450 | raw_spin_unlock(&pel->lock); |
| 11451 | } |
| 11452 | |
Peter Zijlstra | aab5b71 | 2016-05-12 17:26:46 +0200 | [diff] [blame] | 11453 | /* |
| 11454 | * We keep a list of all !task (and therefore per-cpu) events |
| 11455 | * that need to receive side-band records. |
| 11456 | * |
| 11457 | * This avoids having to scan all the various PMU per-cpu contexts |
| 11458 | * looking for them. |
| 11459 | */ |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 11460 | static void account_pmu_sb_event(struct perf_event *event) |
| 11461 | { |
David Carrillo-Cisneros | a4f144e | 2016-06-01 12:33:05 -0700 | [diff] [blame] | 11462 | if (is_sb_event(event)) |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 11463 | attach_sb_event(event); |
| 11464 | } |
| 11465 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 11466 | static void account_event_cpu(struct perf_event *event, int cpu) |
| 11467 | { |
| 11468 | if (event->parent) |
| 11469 | return; |
| 11470 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 11471 | if (is_cgroup_event(event)) |
| 11472 | atomic_inc(&per_cpu(perf_cgroup_events, cpu)); |
| 11473 | } |
| 11474 | |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 11475 | /* Freq events need the tick to stay alive (see perf_event_task_tick). */ |
| 11476 | static void account_freq_event_nohz(void) |
| 11477 | { |
| 11478 | #ifdef CONFIG_NO_HZ_FULL |
| 11479 | /* Lock so we don't race with concurrent unaccount */ |
| 11480 | spin_lock(&nr_freq_lock); |
| 11481 | if (atomic_inc_return(&nr_freq_events) == 1) |
| 11482 | tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS); |
| 11483 | spin_unlock(&nr_freq_lock); |
| 11484 | #endif |
| 11485 | } |
| 11486 | |
| 11487 | static void account_freq_event(void) |
| 11488 | { |
| 11489 | if (tick_nohz_full_enabled()) |
| 11490 | account_freq_event_nohz(); |
| 11491 | else |
| 11492 | atomic_inc(&nr_freq_events); |
| 11493 | } |
| 11494 | |
| 11495 | |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 11496 | static void account_event(struct perf_event *event) |
| 11497 | { |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 11498 | bool inc = false; |
| 11499 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 11500 | if (event->parent) |
| 11501 | return; |
| 11502 | |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 11503 | if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 11504 | inc = true; |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 11505 | if (event->attr.mmap || event->attr.mmap_data) |
| 11506 | atomic_inc(&nr_mmap_events); |
Jiri Olsa | 88a16a1 | 2021-01-14 14:40:44 +0100 | [diff] [blame] | 11507 | if (event->attr.build_id) |
| 11508 | atomic_inc(&nr_build_id_events); |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 11509 | if (event->attr.comm) |
| 11510 | atomic_inc(&nr_comm_events); |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 11511 | if (event->attr.namespaces) |
| 11512 | atomic_inc(&nr_namespaces_events); |
Namhyung Kim | 96aaab6 | 2020-03-25 21:45:28 +0900 | [diff] [blame] | 11513 | if (event->attr.cgroup) |
| 11514 | atomic_inc(&nr_cgroup_events); |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 11515 | if (event->attr.task) |
| 11516 | atomic_inc(&nr_task_events); |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 11517 | if (event->attr.freq) |
| 11518 | account_freq_event(); |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 11519 | if (event->attr.context_switch) { |
| 11520 | atomic_inc(&nr_switch_events); |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 11521 | inc = true; |
Adrian Hunter | 45ac140 | 2015-07-21 12:44:02 +0300 | [diff] [blame] | 11522 | } |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 11523 | if (has_branch_stack(event)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 11524 | inc = true; |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 11525 | if (is_cgroup_event(event)) |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 11526 | inc = true; |
Song Liu | 76193a9 | 2019-01-17 08:15:13 -0800 | [diff] [blame] | 11527 | if (event->attr.ksymbol) |
| 11528 | atomic_inc(&nr_ksymbol_events); |
Song Liu | 6ee52e2 | 2019-01-17 08:15:15 -0800 | [diff] [blame] | 11529 | if (event->attr.bpf_event) |
| 11530 | atomic_inc(&nr_bpf_events); |
Adrian Hunter | e17d43b | 2020-05-12 15:19:08 +0300 | [diff] [blame] | 11531 | if (event->attr.text_poke) |
| 11532 | atomic_inc(&nr_text_poke_events); |
Peter Zijlstra | 25432ae | 2016-01-08 11:05:09 +0100 | [diff] [blame] | 11533 | |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 11534 | if (inc) { |
Alexander Shishkin | 5bce9db | 2017-08-29 17:01:03 +0300 | [diff] [blame] | 11535 | /* |
| 11536 | * We need the mutex here because static_branch_enable() |
| 11537 | * must complete *before* the perf_sched_count increment |
| 11538 | * becomes visible. |
| 11539 | */ |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 11540 | if (atomic_inc_not_zero(&perf_sched_count)) |
| 11541 | goto enabled; |
| 11542 | |
| 11543 | mutex_lock(&perf_sched_mutex); |
| 11544 | if (!atomic_read(&perf_sched_count)) { |
| 11545 | static_branch_enable(&perf_sched_events); |
| 11546 | /* |
| 11547 | * Guarantee that all CPUs observe they key change and |
| 11548 | * call the perf scheduling hooks before proceeding to |
| 11549 | * install events that need them. |
| 11550 | */ |
Paul E. McKenney | 0809d954 | 2018-11-06 19:20:05 -0800 | [diff] [blame] | 11551 | synchronize_rcu(); |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 11552 | } |
| 11553 | /* |
| 11554 | * Now that we have waited for the sync_sched(), allow further |
| 11555 | * increments to by-pass the mutex. |
| 11556 | */ |
| 11557 | atomic_inc(&perf_sched_count); |
| 11558 | mutex_unlock(&perf_sched_mutex); |
| 11559 | } |
| 11560 | enabled: |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 11561 | |
Frederic Weisbecker | 4beb31f | 2013-07-23 02:31:02 +0200 | [diff] [blame] | 11562 | account_event_cpu(event, event->cpu); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 11563 | |
| 11564 | account_pmu_sb_event(event); |
Frederic Weisbecker | 766d6c0 | 2013-07-23 02:31:01 +0200 | [diff] [blame] | 11565 | } |
| 11566 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11567 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 11568 | * Allocate and initialize an event structure |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11569 | */ |
| 11570 | static struct perf_event * |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 11571 | perf_event_alloc(struct perf_event_attr *attr, int cpu, |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 11572 | struct task_struct *task, |
| 11573 | struct perf_event *group_leader, |
| 11574 | struct perf_event *parent_event, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 11575 | perf_overflow_handler_t overflow_handler, |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 11576 | void *context, int cgroup_fd) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11577 | { |
Peter Zijlstra | 51b0fe3 | 2010-06-11 13:35:57 +0200 | [diff] [blame] | 11578 | struct pmu *pmu; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11579 | struct perf_event *event; |
| 11580 | struct hw_perf_event *hwc; |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11581 | long err = -EINVAL; |
Namhyung Kim | ff65338 | 2021-03-11 20:54:13 +0900 | [diff] [blame] | 11582 | int node; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11583 | |
Oleg Nesterov | 66832eb | 2011-01-18 17:10:32 +0100 | [diff] [blame] | 11584 | if ((unsigned)cpu >= nr_cpu_ids) { |
| 11585 | if (!task || cpu != -1) |
| 11586 | return ERR_PTR(-EINVAL); |
| 11587 | } |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 11588 | if (attr->sigtrap && !task) { |
| 11589 | /* Requires a task: avoid signalling random tasks. */ |
| 11590 | return ERR_PTR(-EINVAL); |
| 11591 | } |
Oleg Nesterov | 66832eb | 2011-01-18 17:10:32 +0100 | [diff] [blame] | 11592 | |
Namhyung Kim | ff65338 | 2021-03-11 20:54:13 +0900 | [diff] [blame] | 11593 | node = (cpu >= 0) ? cpu_to_node(cpu) : -1; |
| 11594 | event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, |
| 11595 | node); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11596 | if (!event) |
| 11597 | return ERR_PTR(-ENOMEM); |
| 11598 | |
| 11599 | /* |
| 11600 | * Single events are their own group leaders, with an |
| 11601 | * empty sibling list: |
| 11602 | */ |
| 11603 | if (!group_leader) |
| 11604 | group_leader = event; |
| 11605 | |
| 11606 | mutex_init(&event->child_mutex); |
| 11607 | INIT_LIST_HEAD(&event->child_list); |
| 11608 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11609 | INIT_LIST_HEAD(&event->event_entry); |
| 11610 | INIT_LIST_HEAD(&event->sibling_list); |
Peter Zijlstra | 6668128 | 2017-11-13 14:28:38 +0100 | [diff] [blame] | 11611 | INIT_LIST_HEAD(&event->active_list); |
Alexey Budankov | 8e1a203 | 2017-09-08 11:47:03 +0300 | [diff] [blame] | 11612 | init_event_group(event); |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 11613 | INIT_LIST_HEAD(&event->rb_entry); |
Stephane Eranian | 71ad88e | 2013-11-12 17:58:48 +0100 | [diff] [blame] | 11614 | INIT_LIST_HEAD(&event->active_entry); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11615 | INIT_LIST_HEAD(&event->addr_filters.list); |
Stephane Eranian | f3ae75d | 2014-01-08 11:15:52 +0100 | [diff] [blame] | 11616 | INIT_HLIST_NODE(&event->hlist_entry); |
| 11617 | |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 11618 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11619 | init_waitqueue_head(&event->waitq); |
Peter Zijlstra | 1d54ad9 | 2019-04-04 15:03:00 +0200 | [diff] [blame] | 11620 | event->pending_disable = -1; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 11621 | init_irq_work(&event->pending, perf_pending_event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11622 | |
| 11623 | mutex_init(&event->mmap_mutex); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11624 | raw_spin_lock_init(&event->addr_filters.lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11625 | |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 11626 | atomic_long_set(&event->refcount, 1); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11627 | event->cpu = cpu; |
| 11628 | event->attr = *attr; |
| 11629 | event->group_leader = group_leader; |
| 11630 | event->pmu = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11631 | event->oncpu = -1; |
| 11632 | |
| 11633 | event->parent = parent_event; |
| 11634 | |
Eric W. Biederman | 17cf22c | 2010-03-02 14:51:53 -0800 | [diff] [blame] | 11635 | event->ns = get_pid_ns(task_active_pid_ns(current)); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11636 | event->id = atomic64_inc_return(&perf_event_id); |
| 11637 | |
| 11638 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 11639 | |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 11640 | if (event->attr.sigtrap) |
| 11641 | atomic_set(&event->event_limit, 1); |
| 11642 | |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 11643 | if (task) { |
| 11644 | event->attach_state = PERF_ATTACH_TASK; |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 11645 | /* |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 11646 | * XXX pmu::event_init needs to know what task to account to |
| 11647 | * and we cannot use the ctx information because we need the |
| 11648 | * pmu before we get a ctx. |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 11649 | */ |
Matthew Wilcox (Oracle) | 7b3c92b | 2019-07-04 15:13:23 -0700 | [diff] [blame] | 11650 | event->hw.target = get_task_struct(task); |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 11651 | } |
| 11652 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 11653 | event->clock = &local_clock; |
| 11654 | if (parent_event) |
| 11655 | event->clock = parent_event->clock; |
| 11656 | |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 11657 | if (!overflow_handler && parent_event) { |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 11658 | overflow_handler = parent_event->overflow_handler; |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 11659 | context = parent_event->overflow_handler_context; |
Arnd Bergmann | f1e4ba5 | 2016-09-06 15:10:22 +0200 | [diff] [blame] | 11660 | #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING) |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 11661 | if (overflow_handler == bpf_overflow_handler) { |
Andrii Nakryiko | 85192db | 2019-11-17 09:28:03 -0800 | [diff] [blame] | 11662 | struct bpf_prog *prog = parent_event->prog; |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 11663 | |
Andrii Nakryiko | 85192db | 2019-11-17 09:28:03 -0800 | [diff] [blame] | 11664 | bpf_prog_inc(prog); |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 11665 | event->prog = prog; |
| 11666 | event->orig_overflow_handler = |
| 11667 | parent_event->orig_overflow_handler; |
| 11668 | } |
| 11669 | #endif |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 11670 | } |
Oleg Nesterov | 66832eb | 2011-01-18 17:10:32 +0100 | [diff] [blame] | 11671 | |
Wang Nan | 1879445 | 2016-03-28 06:41:30 +0000 | [diff] [blame] | 11672 | if (overflow_handler) { |
| 11673 | event->overflow_handler = overflow_handler; |
| 11674 | event->overflow_handler_context = context; |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 11675 | } else if (is_write_backward(event)){ |
| 11676 | event->overflow_handler = perf_event_output_backward; |
| 11677 | event->overflow_handler_context = NULL; |
Wang Nan | 1879445 | 2016-03-28 06:41:30 +0000 | [diff] [blame] | 11678 | } else { |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 11679 | event->overflow_handler = perf_event_output_forward; |
Wang Nan | 1879445 | 2016-03-28 06:41:30 +0000 | [diff] [blame] | 11680 | event->overflow_handler_context = NULL; |
| 11681 | } |
Frederic Weisbecker | 97eaf53 | 2009-10-18 15:33:50 +0200 | [diff] [blame] | 11682 | |
Jiri Olsa | 0231bb5 | 2013-02-01 11:23:45 +0100 | [diff] [blame] | 11683 | perf_event__state_init(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11684 | |
| 11685 | pmu = NULL; |
| 11686 | |
| 11687 | hwc = &event->hw; |
| 11688 | hwc->sample_period = attr->sample_period; |
| 11689 | if (attr->freq && attr->sample_freq) |
| 11690 | hwc->sample_period = 1; |
| 11691 | hwc->last_period = hwc->sample_period; |
| 11692 | |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 11693 | local64_set(&hwc->period_left, hwc->sample_period); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11694 | |
| 11695 | /* |
Peter Zijlstra | ba5213a | 2017-05-30 11:45:12 +0200 | [diff] [blame] | 11696 | * We currently do not support PERF_SAMPLE_READ on inherited events. |
| 11697 | * See perf_output_read(). |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11698 | */ |
Peter Zijlstra | ba5213a | 2017-05-30 11:45:12 +0200 | [diff] [blame] | 11699 | if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)) |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11700 | goto err_ns; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11701 | |
Yan, Zheng | a46a230 | 2014-11-04 21:56:06 -0500 | [diff] [blame] | 11702 | if (!has_branch_stack(event)) |
| 11703 | event->attr.branch_sample_type = 0; |
| 11704 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 11705 | pmu = perf_init_event(event); |
Dan Carpenter | 85c617a | 2017-05-22 12:03:49 +0300 | [diff] [blame] | 11706 | if (IS_ERR(pmu)) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11707 | err = PTR_ERR(pmu); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11708 | goto err_ns; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11709 | } |
| 11710 | |
Peter Zijlstra | 09f4e8f | 2019-11-06 12:51:04 +0100 | [diff] [blame] | 11711 | /* |
| 11712 | * Disallow uncore-cgroup events, they don't make sense as the cgroup will |
| 11713 | * be different on other CPUs in the uncore mask. |
| 11714 | */ |
| 11715 | if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) { |
| 11716 | err = -EINVAL; |
| 11717 | goto err_pmu; |
| 11718 | } |
| 11719 | |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 11720 | if (event->attr.aux_output && |
| 11721 | !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { |
| 11722 | err = -EOPNOTSUPP; |
| 11723 | goto err_pmu; |
| 11724 | } |
| 11725 | |
Peter Zijlstra | 98add2a | 2020-02-13 23:51:28 -0800 | [diff] [blame] | 11726 | if (cgroup_fd != -1) { |
| 11727 | err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); |
| 11728 | if (err) |
| 11729 | goto err_pmu; |
| 11730 | } |
| 11731 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 11732 | err = exclusive_event_init(event); |
| 11733 | if (err) |
| 11734 | goto err_pmu; |
| 11735 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11736 | if (has_addr_filter(event)) { |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 11737 | event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, |
| 11738 | sizeof(struct perf_addr_filter_range), |
| 11739 | GFP_KERNEL); |
| 11740 | if (!event->addr_filter_ranges) { |
Dan Carpenter | 36cc2b9 | 2017-05-22 12:04:18 +0300 | [diff] [blame] | 11741 | err = -ENOMEM; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11742 | goto err_per_task; |
Dan Carpenter | 36cc2b9 | 2017-05-22 12:04:18 +0300 | [diff] [blame] | 11743 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11744 | |
Alexander Shishkin | 18736ee | 2019-02-15 13:56:54 +0200 | [diff] [blame] | 11745 | /* |
| 11746 | * Clone the parent's vma offsets: they are valid until exec() |
| 11747 | * even if the mm is not shared with the parent. |
| 11748 | */ |
| 11749 | if (event->parent) { |
| 11750 | struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); |
| 11751 | |
| 11752 | raw_spin_lock_irq(&ifh->lock); |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 11753 | memcpy(event->addr_filter_ranges, |
| 11754 | event->parent->addr_filter_ranges, |
| 11755 | pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range)); |
Alexander Shishkin | 18736ee | 2019-02-15 13:56:54 +0200 | [diff] [blame] | 11756 | raw_spin_unlock_irq(&ifh->lock); |
| 11757 | } |
| 11758 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11759 | /* force hw sync on the address filters */ |
| 11760 | event->addr_filters_gen = 1; |
| 11761 | } |
| 11762 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11763 | if (!event->parent) { |
Frederic Weisbecker | 927c7a9 | 2010-07-01 16:20:36 +0200 | [diff] [blame] | 11764 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { |
Arnaldo Carvalho de Melo | 97c79a3 | 2016-04-28 13:16:33 -0300 | [diff] [blame] | 11765 | err = get_callchain_buffers(attr->sample_max_stack); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11766 | if (err) |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11767 | goto err_addr_filters; |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 11768 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11769 | } |
| 11770 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 11771 | err = security_perf_event_alloc(event); |
| 11772 | if (err) |
| 11773 | goto err_callchain_buffer; |
| 11774 | |
Alexander Shishkin | 927a557 | 2016-03-02 13:24:14 +0200 | [diff] [blame] | 11775 | /* symmetric to unaccount_event() in _free_event() */ |
| 11776 | account_event(event); |
| 11777 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11778 | return event; |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11779 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 11780 | err_callchain_buffer: |
| 11781 | if (!event->parent) { |
| 11782 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) |
| 11783 | put_callchain_buffers(); |
| 11784 | } |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11785 | err_addr_filters: |
Alexander Shishkin | c60f83b | 2019-02-15 13:56:55 +0200 | [diff] [blame] | 11786 | kfree(event->addr_filter_ranges); |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 11787 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 11788 | err_per_task: |
| 11789 | exclusive_event_destroy(event); |
| 11790 | |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11791 | err_pmu: |
Peter Zijlstra | 98add2a | 2020-02-13 23:51:28 -0800 | [diff] [blame] | 11792 | if (is_cgroup_event(event)) |
| 11793 | perf_detach_cgroup(event); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11794 | if (event->destroy) |
| 11795 | event->destroy(event); |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 11796 | module_put(pmu->module); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11797 | err_ns: |
| 11798 | if (event->ns) |
| 11799 | put_pid_ns(event->ns); |
Prashant Bhole | 621b6d2 | 2018-04-09 19:03:46 +0900 | [diff] [blame] | 11800 | if (event->hw.target) |
| 11801 | put_task_struct(event->hw.target); |
Namhyung Kim | bdacfaf | 2021-03-11 20:54:12 +0900 | [diff] [blame] | 11802 | kmem_cache_free(perf_event_cache, event); |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 11803 | |
| 11804 | return ERR_PTR(err); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11805 | } |
| 11806 | |
| 11807 | static int perf_copy_attr(struct perf_event_attr __user *uattr, |
| 11808 | struct perf_event_attr *attr) |
| 11809 | { |
| 11810 | u32 size; |
| 11811 | int ret; |
| 11812 | |
Aleksa Sarai | c2ba8f4 | 2019-10-01 11:10:55 +1000 | [diff] [blame] | 11813 | /* Zero the full structure, so that a short copy will be nice. */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11814 | memset(attr, 0, sizeof(*attr)); |
| 11815 | |
| 11816 | ret = get_user(size, &uattr->size); |
| 11817 | if (ret) |
| 11818 | return ret; |
| 11819 | |
Aleksa Sarai | c2ba8f4 | 2019-10-01 11:10:55 +1000 | [diff] [blame] | 11820 | /* ABI compatibility quirk: */ |
| 11821 | if (!size) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11822 | size = PERF_ATTR_SIZE_VER0; |
Aleksa Sarai | c2ba8f4 | 2019-10-01 11:10:55 +1000 | [diff] [blame] | 11823 | if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11824 | goto err_size; |
| 11825 | |
Aleksa Sarai | c2ba8f4 | 2019-10-01 11:10:55 +1000 | [diff] [blame] | 11826 | ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); |
| 11827 | if (ret) { |
| 11828 | if (ret == -E2BIG) |
| 11829 | goto err_size; |
| 11830 | return ret; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11831 | } |
| 11832 | |
Meng Xu | f12f42a | 2017-08-23 17:07:50 -0400 | [diff] [blame] | 11833 | attr->size = size; |
| 11834 | |
Alexander Shishkin | a4faf00 | 2019-10-25 17:08:33 +0300 | [diff] [blame] | 11835 | if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11836 | return -EINVAL; |
| 11837 | |
| 11838 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) |
| 11839 | return -EINVAL; |
| 11840 | |
| 11841 | if (attr->read_format & ~(PERF_FORMAT_MAX-1)) |
| 11842 | return -EINVAL; |
| 11843 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 11844 | if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { |
| 11845 | u64 mask = attr->branch_sample_type; |
| 11846 | |
| 11847 | /* only using defined bits */ |
| 11848 | if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) |
| 11849 | return -EINVAL; |
| 11850 | |
| 11851 | /* at least one branch bit must be set */ |
| 11852 | if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) |
| 11853 | return -EINVAL; |
| 11854 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 11855 | /* propagate priv level, when not set for branch */ |
| 11856 | if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { |
| 11857 | |
| 11858 | /* exclude_kernel checked on syscall entry */ |
| 11859 | if (!attr->exclude_kernel) |
| 11860 | mask |= PERF_SAMPLE_BRANCH_KERNEL; |
| 11861 | |
| 11862 | if (!attr->exclude_user) |
| 11863 | mask |= PERF_SAMPLE_BRANCH_USER; |
| 11864 | |
| 11865 | if (!attr->exclude_hv) |
| 11866 | mask |= PERF_SAMPLE_BRANCH_HV; |
| 11867 | /* |
| 11868 | * adjust user setting (for HW filter setup) |
| 11869 | */ |
| 11870 | attr->branch_sample_type = mask; |
| 11871 | } |
Stephane Eranian | e712209 | 2013-06-06 11:02:04 +0200 | [diff] [blame] | 11872 | /* privileged levels capture (kernel, hv): check permissions */ |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 11873 | if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) { |
| 11874 | ret = perf_allow_kernel(attr); |
| 11875 | if (ret) |
| 11876 | return ret; |
| 11877 | } |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 11878 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 11879 | |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 11880 | if (attr->sample_type & PERF_SAMPLE_REGS_USER) { |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 11881 | ret = perf_reg_validate(attr->sample_regs_user); |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 11882 | if (ret) |
| 11883 | return ret; |
| 11884 | } |
| 11885 | |
| 11886 | if (attr->sample_type & PERF_SAMPLE_STACK_USER) { |
| 11887 | if (!arch_perf_have_user_stack_dump()) |
| 11888 | return -ENOSYS; |
| 11889 | |
| 11890 | /* |
| 11891 | * We have __u32 type for the size, but so far |
| 11892 | * we can only use __u16 as maximum due to the |
| 11893 | * __u16 sample size limit. |
| 11894 | */ |
| 11895 | if (attr->sample_stack_user >= USHRT_MAX) |
Jiri Olsa | 78b562f | 2018-04-15 11:23:50 +0200 | [diff] [blame] | 11896 | return -EINVAL; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 11897 | else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) |
Jiri Olsa | 78b562f | 2018-04-15 11:23:50 +0200 | [diff] [blame] | 11898 | return -EINVAL; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 11899 | } |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 11900 | |
Jiri Olsa | 5f97052 | 2018-03-12 14:45:46 +0100 | [diff] [blame] | 11901 | if (!attr->sample_max_stack) |
| 11902 | attr->sample_max_stack = sysctl_perf_event_max_stack; |
| 11903 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 11904 | if (attr->sample_type & PERF_SAMPLE_REGS_INTR) |
| 11905 | ret = perf_reg_validate(attr->sample_regs_intr); |
Namhyung Kim | 6546b19 | 2020-03-25 21:45:29 +0900 | [diff] [blame] | 11906 | |
| 11907 | #ifndef CONFIG_CGROUP_PERF |
| 11908 | if (attr->sample_type & PERF_SAMPLE_CGROUP) |
| 11909 | return -EINVAL; |
| 11910 | #endif |
Kan Liang | 2a6c6b7 | 2021-01-28 14:40:07 -0800 | [diff] [blame] | 11911 | if ((attr->sample_type & PERF_SAMPLE_WEIGHT) && |
| 11912 | (attr->sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) |
| 11913 | return -EINVAL; |
Namhyung Kim | 6546b19 | 2020-03-25 21:45:29 +0900 | [diff] [blame] | 11914 | |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 11915 | if (!attr->inherit && attr->inherit_thread) |
| 11916 | return -EINVAL; |
| 11917 | |
Marco Elver | 2e498d0 | 2021-04-08 12:35:59 +0200 | [diff] [blame] | 11918 | if (attr->remove_on_exec && attr->enable_on_exec) |
| 11919 | return -EINVAL; |
| 11920 | |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 11921 | if (attr->sigtrap && !attr->remove_on_exec) |
| 11922 | return -EINVAL; |
| 11923 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11924 | out: |
| 11925 | return ret; |
| 11926 | |
| 11927 | err_size: |
| 11928 | put_user(sizeof(*attr), &uattr->size); |
| 11929 | ret = -E2BIG; |
| 11930 | goto out; |
| 11931 | } |
| 11932 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11933 | static int |
| 11934 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11935 | { |
Steven Rostedt (VMware) | 56de4e8 | 2019-12-13 13:21:30 -0500 | [diff] [blame] | 11936 | struct perf_buffer *rb = NULL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11937 | int ret = -EINVAL; |
| 11938 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11939 | if (!output_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11940 | goto set; |
| 11941 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11942 | /* don't allow circular references */ |
| 11943 | if (event == output_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11944 | goto out; |
| 11945 | |
Peter Zijlstra | 0f13930 | 2010-05-20 14:35:15 +0200 | [diff] [blame] | 11946 | /* |
| 11947 | * Don't allow cross-cpu buffers |
| 11948 | */ |
| 11949 | if (output_event->cpu != event->cpu) |
| 11950 | goto out; |
| 11951 | |
| 11952 | /* |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 11953 | * If its not a per-cpu rb, it must be the same task. |
Peter Zijlstra | 0f13930 | 2010-05-20 14:35:15 +0200 | [diff] [blame] | 11954 | */ |
| 11955 | if (output_event->cpu == -1 && output_event->ctx != event->ctx) |
| 11956 | goto out; |
| 11957 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 11958 | /* |
| 11959 | * Mixing clocks in the same buffer is trouble you don't need. |
| 11960 | */ |
| 11961 | if (output_event->clock != event->clock) |
| 11962 | goto out; |
| 11963 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 11964 | /* |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 11965 | * Either writing ring buffer from beginning or from end. |
| 11966 | * Mixing is not allowed. |
| 11967 | */ |
| 11968 | if (is_write_backward(output_event) != is_write_backward(event)) |
| 11969 | goto out; |
| 11970 | |
| 11971 | /* |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 11972 | * If both events generate aux data, they must be on the same PMU |
| 11973 | */ |
| 11974 | if (has_aux(event) && has_aux(output_event) && |
| 11975 | event->pmu != output_event->pmu) |
| 11976 | goto out; |
| 11977 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11978 | set: |
| 11979 | mutex_lock(&event->mmap_mutex); |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11980 | /* Can't redirect output if we've got an active mmap() */ |
| 11981 | if (atomic_read(&event->mmap_count)) |
| 11982 | goto unlock; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11983 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11984 | if (output_event) { |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 11985 | /* get the rb we want to redirect to */ |
| 11986 | rb = ring_buffer_get(output_event); |
| 11987 | if (!rb) |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11988 | goto unlock; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11989 | } |
| 11990 | |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 11991 | ring_buffer_attach(event, rb); |
Peter Zijlstra | 9bb5d40 | 2013-06-04 10:44:21 +0200 | [diff] [blame] | 11992 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11993 | ret = 0; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 11994 | unlock: |
| 11995 | mutex_unlock(&event->mmap_mutex); |
| 11996 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11997 | out: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 11998 | return ret; |
| 11999 | } |
| 12000 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 12001 | static void mutex_lock_double(struct mutex *a, struct mutex *b) |
| 12002 | { |
| 12003 | if (b < a) |
| 12004 | swap(a, b); |
| 12005 | |
| 12006 | mutex_lock(a); |
| 12007 | mutex_lock_nested(b, SINGLE_DEPTH_NESTING); |
| 12008 | } |
| 12009 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 12010 | static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) |
| 12011 | { |
| 12012 | bool nmi_safe = false; |
| 12013 | |
| 12014 | switch (clk_id) { |
| 12015 | case CLOCK_MONOTONIC: |
| 12016 | event->clock = &ktime_get_mono_fast_ns; |
| 12017 | nmi_safe = true; |
| 12018 | break; |
| 12019 | |
| 12020 | case CLOCK_MONOTONIC_RAW: |
| 12021 | event->clock = &ktime_get_raw_fast_ns; |
| 12022 | nmi_safe = true; |
| 12023 | break; |
| 12024 | |
| 12025 | case CLOCK_REALTIME: |
| 12026 | event->clock = &ktime_get_real_ns; |
| 12027 | break; |
| 12028 | |
| 12029 | case CLOCK_BOOTTIME: |
Jason A. Donenfeld | 9285ec4 | 2019-06-21 22:32:48 +0200 | [diff] [blame] | 12030 | event->clock = &ktime_get_boottime_ns; |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 12031 | break; |
| 12032 | |
| 12033 | case CLOCK_TAI: |
Jason A. Donenfeld | 9285ec4 | 2019-06-21 22:32:48 +0200 | [diff] [blame] | 12034 | event->clock = &ktime_get_clocktai_ns; |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 12035 | break; |
| 12036 | |
| 12037 | default: |
| 12038 | return -EINVAL; |
| 12039 | } |
| 12040 | |
| 12041 | if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) |
| 12042 | return -EINVAL; |
| 12043 | |
| 12044 | return 0; |
| 12045 | } |
| 12046 | |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 12047 | /* |
| 12048 | * Variation on perf_event_ctx_lock_nested(), except we take two context |
| 12049 | * mutexes. |
| 12050 | */ |
| 12051 | static struct perf_event_context * |
| 12052 | __perf_event_ctx_lock_double(struct perf_event *group_leader, |
| 12053 | struct perf_event_context *ctx) |
| 12054 | { |
| 12055 | struct perf_event_context *gctx; |
| 12056 | |
| 12057 | again: |
| 12058 | rcu_read_lock(); |
| 12059 | gctx = READ_ONCE(group_leader->ctx); |
Elena Reshetova | 8c94abb | 2019-01-28 14:27:26 +0200 | [diff] [blame] | 12060 | if (!refcount_inc_not_zero(&gctx->refcount)) { |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 12061 | rcu_read_unlock(); |
| 12062 | goto again; |
| 12063 | } |
| 12064 | rcu_read_unlock(); |
| 12065 | |
| 12066 | mutex_lock_double(&gctx->mutex, &ctx->mutex); |
| 12067 | |
| 12068 | if (group_leader->ctx != gctx) { |
| 12069 | mutex_unlock(&ctx->mutex); |
| 12070 | mutex_unlock(&gctx->mutex); |
| 12071 | put_ctx(gctx); |
| 12072 | goto again; |
| 12073 | } |
| 12074 | |
| 12075 | return gctx; |
| 12076 | } |
| 12077 | |
Marco Elver | b068fc0 | 2021-07-05 10:44:53 +0200 | [diff] [blame] | 12078 | static bool |
| 12079 | perf_check_permission(struct perf_event_attr *attr, struct task_struct *task) |
| 12080 | { |
| 12081 | unsigned int ptrace_mode = PTRACE_MODE_READ_REALCREDS; |
| 12082 | bool is_capable = perfmon_capable(); |
| 12083 | |
| 12084 | if (attr->sigtrap) { |
| 12085 | /* |
| 12086 | * perf_event_attr::sigtrap sends signals to the other task. |
| 12087 | * Require the current task to also have CAP_KILL. |
| 12088 | */ |
| 12089 | rcu_read_lock(); |
| 12090 | is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL); |
| 12091 | rcu_read_unlock(); |
| 12092 | |
| 12093 | /* |
| 12094 | * If the required capabilities aren't available, checks for |
| 12095 | * ptrace permissions: upgrade to ATTACH, since sending signals |
| 12096 | * can effectively change the target task. |
| 12097 | */ |
| 12098 | ptrace_mode = PTRACE_MODE_ATTACH_REALCREDS; |
| 12099 | } |
| 12100 | |
| 12101 | /* |
| 12102 | * Preserve ptrace permission check for backwards compatibility. The |
| 12103 | * ptrace check also includes checks that the current task and other |
| 12104 | * task have matching uids, and is therefore not done here explicitly. |
| 12105 | */ |
| 12106 | return is_capable || ptrace_may_access(task, ptrace_mode); |
| 12107 | } |
| 12108 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12109 | /** |
| 12110 | * sys_perf_event_open - open a performance event, associate it to a task/cpu |
| 12111 | * |
| 12112 | * @attr_uptr: event_id type attributes for monitoring/sampling |
| 12113 | * @pid: target pid |
| 12114 | * @cpu: target cpu |
| 12115 | * @group_fd: group leader event fd |
Haocheng Xie | a1ddf52 | 2021-05-27 11:19:46 +0800 | [diff] [blame] | 12116 | * @flags: perf event open flags |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12117 | */ |
| 12118 | SYSCALL_DEFINE5(perf_event_open, |
| 12119 | struct perf_event_attr __user *, attr_uptr, |
| 12120 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) |
| 12121 | { |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12122 | struct perf_event *group_leader = NULL, *output_event = NULL; |
| 12123 | struct perf_event *event, *sibling; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12124 | struct perf_event_attr attr; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 12125 | struct perf_event_context *ctx, *gctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12126 | struct file *event_file = NULL; |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 12127 | struct fd group = {NULL, 0}; |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 12128 | struct task_struct *task = NULL; |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 12129 | struct pmu *pmu; |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 12130 | int event_fd; |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12131 | int move_group = 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12132 | int err; |
Yann Droneaud | a21b0b3 | 2014-01-05 21:36:33 +0100 | [diff] [blame] | 12133 | int f_flags = O_RDWR; |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 12134 | int cgroup_fd = -1; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12135 | |
| 12136 | /* for future expandability... */ |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12137 | if (flags & ~PERF_FLAG_ALL) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12138 | return -EINVAL; |
| 12139 | |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 12140 | /* Do we allow access to perf_event_open(2) ? */ |
| 12141 | err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); |
| 12142 | if (err) |
| 12143 | return err; |
| 12144 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12145 | err = perf_copy_attr(attr_uptr, &attr); |
| 12146 | if (err) |
| 12147 | return err; |
| 12148 | |
| 12149 | if (!attr.exclude_kernel) { |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 12150 | err = perf_allow_kernel(&attr); |
| 12151 | if (err) |
| 12152 | return err; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12153 | } |
| 12154 | |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 12155 | if (attr.namespaces) { |
Alexey Budankov | 18aa185 | 2020-04-02 11:46:24 +0300 | [diff] [blame] | 12156 | if (!perfmon_capable()) |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 12157 | return -EACCES; |
| 12158 | } |
| 12159 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12160 | if (attr.freq) { |
| 12161 | if (attr.sample_freq > sysctl_perf_event_sample_rate) |
| 12162 | return -EINVAL; |
Peter Zijlstra | 0819b2e | 2014-05-15 20:23:48 +0200 | [diff] [blame] | 12163 | } else { |
| 12164 | if (attr.sample_period & (1ULL << 63)) |
| 12165 | return -EINVAL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12166 | } |
| 12167 | |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 12168 | /* Only privileged users can get physical addresses */ |
Joel Fernandes (Google) | da97e18 | 2019-10-14 13:03:08 -0400 | [diff] [blame] | 12169 | if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) { |
| 12170 | err = perf_allow_kernel(&attr); |
| 12171 | if (err) |
| 12172 | return err; |
| 12173 | } |
Kan Liang | fc7ce9c | 2017-08-28 20:52:49 -0400 | [diff] [blame] | 12174 | |
Ondrej Mosnacek | 08ef1af | 2021-02-24 22:56:28 +0100 | [diff] [blame] | 12175 | /* REGS_INTR can leak data, lockdown must prevent this */ |
| 12176 | if (attr.sample_type & PERF_SAMPLE_REGS_INTR) { |
| 12177 | err = security_locked_down(LOCKDOWN_PERF); |
| 12178 | if (err) |
| 12179 | return err; |
| 12180 | } |
David Howells | b0c8fdc | 2019-08-19 17:18:00 -0700 | [diff] [blame] | 12181 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12182 | /* |
| 12183 | * In cgroup mode, the pid argument is used to pass the fd |
| 12184 | * opened to the cgroup directory in cgroupfs. The cpu argument |
| 12185 | * designates the cpu on which to monitor threads from that |
| 12186 | * cgroup. |
| 12187 | */ |
| 12188 | if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) |
| 12189 | return -EINVAL; |
| 12190 | |
Yann Droneaud | a21b0b3 | 2014-01-05 21:36:33 +0100 | [diff] [blame] | 12191 | if (flags & PERF_FLAG_FD_CLOEXEC) |
| 12192 | f_flags |= O_CLOEXEC; |
| 12193 | |
| 12194 | event_fd = get_unused_fd_flags(f_flags); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 12195 | if (event_fd < 0) |
| 12196 | return event_fd; |
| 12197 | |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 12198 | if (group_fd != -1) { |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 12199 | err = perf_fget_light(group_fd, &group); |
| 12200 | if (err) |
Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 12201 | goto err_fd; |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 12202 | group_leader = group.file->private_data; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 12203 | if (flags & PERF_FLAG_FD_OUTPUT) |
| 12204 | output_event = group_leader; |
| 12205 | if (flags & PERF_FLAG_FD_NO_GROUP) |
| 12206 | group_leader = NULL; |
| 12207 | } |
| 12208 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 12209 | if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { |
Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 12210 | task = find_lively_task_by_vpid(pid); |
| 12211 | if (IS_ERR(task)) { |
| 12212 | err = PTR_ERR(task); |
| 12213 | goto err_group_fd; |
| 12214 | } |
| 12215 | } |
| 12216 | |
Peter Zijlstra | 1f4ee50 | 2014-05-06 09:59:34 +0200 | [diff] [blame] | 12217 | if (task && group_leader && |
| 12218 | group_leader->attr.inherit != attr.inherit) { |
| 12219 | err = -EINVAL; |
| 12220 | goto err_task; |
| 12221 | } |
| 12222 | |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 12223 | if (flags & PERF_FLAG_PID_CGROUP) |
| 12224 | cgroup_fd = pid; |
| 12225 | |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 12226 | event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 12227 | NULL, NULL, cgroup_fd); |
Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 12228 | if (IS_ERR(event)) { |
| 12229 | err = PTR_ERR(event); |
peterz@infradead.org | 78af4dc | 2020-08-28 14:37:20 +0200 | [diff] [blame] | 12230 | goto err_task; |
Stephane Eranian | d14b12d | 2010-09-17 11:28:47 +0200 | [diff] [blame] | 12231 | } |
| 12232 | |
Vince Weaver | 53b2533 | 2014-05-16 17:12:12 -0400 | [diff] [blame] | 12233 | if (is_sampling_event(event)) { |
| 12234 | if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { |
Vineet Gupta | a139655 | 2016-05-09 15:07:40 +0530 | [diff] [blame] | 12235 | err = -EOPNOTSUPP; |
Vince Weaver | 53b2533 | 2014-05-16 17:12:12 -0400 | [diff] [blame] | 12236 | goto err_alloc; |
| 12237 | } |
| 12238 | } |
| 12239 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12240 | /* |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 12241 | * Special case software events and allow them to be part of |
| 12242 | * any hardware group. |
| 12243 | */ |
| 12244 | pmu = event->pmu; |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12245 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 12246 | if (attr.use_clockid) { |
| 12247 | err = perf_event_set_clock(event, attr.clockid); |
| 12248 | if (err) |
| 12249 | goto err_alloc; |
| 12250 | } |
| 12251 | |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 12252 | if (pmu->task_ctx_nr == perf_sw_context) |
| 12253 | event->event_caps |= PERF_EV_CAP_SOFTWARE; |
| 12254 | |
Song Liu | a1150c2 | 2018-05-03 12:47:16 -0700 | [diff] [blame] | 12255 | if (group_leader) { |
| 12256 | if (is_software_event(event) && |
| 12257 | !in_software_context(group_leader)) { |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12258 | /* |
Song Liu | a1150c2 | 2018-05-03 12:47:16 -0700 | [diff] [blame] | 12259 | * If the event is a sw event, but the group_leader |
| 12260 | * is on hw context. |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12261 | * |
Song Liu | a1150c2 | 2018-05-03 12:47:16 -0700 | [diff] [blame] | 12262 | * Allow the addition of software events to hw |
| 12263 | * groups, this is safe because software events |
| 12264 | * never fail to schedule. |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12265 | */ |
Song Liu | a1150c2 | 2018-05-03 12:47:16 -0700 | [diff] [blame] | 12266 | pmu = group_leader->ctx->pmu; |
| 12267 | } else if (!is_software_event(event) && |
| 12268 | is_software_event(group_leader) && |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 12269 | (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) { |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12270 | /* |
| 12271 | * In case the group is a pure software group, and we |
| 12272 | * try to add a hardware event, move the whole group to |
| 12273 | * the hardware context. |
| 12274 | */ |
| 12275 | move_group = 1; |
| 12276 | } |
| 12277 | } |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 12278 | |
| 12279 | /* |
| 12280 | * Get the target context (task or percpu): |
| 12281 | */ |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 12282 | ctx = find_get_context(pmu, task, event); |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 12283 | if (IS_ERR(ctx)) { |
| 12284 | err = PTR_ERR(ctx); |
Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 12285 | goto err_alloc; |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 12286 | } |
| 12287 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12288 | /* |
| 12289 | * Look up the group leader (we will attach this event to it): |
| 12290 | */ |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 12291 | if (group_leader) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12292 | err = -EINVAL; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12293 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12294 | /* |
| 12295 | * Do not allow a recursive hierarchy (this new sibling |
| 12296 | * becoming part of another group-sibling): |
| 12297 | */ |
| 12298 | if (group_leader->group_leader != group_leader) |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12299 | goto err_context; |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 12300 | |
| 12301 | /* All events in a group should have the same clock */ |
| 12302 | if (group_leader->clock != event->clock) |
| 12303 | goto err_context; |
| 12304 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12305 | /* |
Mark Rutland | 64aee2a | 2017-06-22 15:41:38 +0100 | [diff] [blame] | 12306 | * Make sure we're both events for the same CPU; |
| 12307 | * grouping events for different CPUs is broken; since |
| 12308 | * you can never concurrently schedule them anyhow. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12309 | */ |
Mark Rutland | 64aee2a | 2017-06-22 15:41:38 +0100 | [diff] [blame] | 12310 | if (group_leader->cpu != event->cpu) |
| 12311 | goto err_context; |
Peter Zijlstra | c3c87e7 | 2015-01-23 11:19:48 +0100 | [diff] [blame] | 12312 | |
Mark Rutland | 64aee2a | 2017-06-22 15:41:38 +0100 | [diff] [blame] | 12313 | /* |
| 12314 | * Make sure we're both on the same task, or both |
| 12315 | * per-CPU events. |
| 12316 | */ |
| 12317 | if (group_leader->ctx->task != ctx->task) |
| 12318 | goto err_context; |
| 12319 | |
| 12320 | /* |
| 12321 | * Do not allow to attach to a group in a different task |
| 12322 | * or CPU context. If we're moving SW events, we'll fix |
| 12323 | * this up later, so allow that. |
| 12324 | */ |
| 12325 | if (!move_group && group_leader->ctx != ctx) |
| 12326 | goto err_context; |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12327 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12328 | /* |
| 12329 | * Only a group leader can be exclusive or pinned |
| 12330 | */ |
| 12331 | if (attr.exclusive || attr.pinned) |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12332 | goto err_context; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 12333 | } |
| 12334 | |
| 12335 | if (output_event) { |
| 12336 | err = perf_event_set_output(event, output_event); |
| 12337 | if (err) |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12338 | goto err_context; |
Peter Zijlstra | ac9721f | 2010-05-27 12:54:41 +0200 | [diff] [blame] | 12339 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12340 | |
Yann Droneaud | a21b0b3 | 2014-01-05 21:36:33 +0100 | [diff] [blame] | 12341 | event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, |
| 12342 | f_flags); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 12343 | if (IS_ERR(event_file)) { |
| 12344 | err = PTR_ERR(event_file); |
Alexander Shishkin | 201c2f8 | 2016-03-21 10:02:42 +0200 | [diff] [blame] | 12345 | event_file = NULL; |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12346 | goto err_context; |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 12347 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12348 | |
peterz@infradead.org | 78af4dc | 2020-08-28 14:37:20 +0200 | [diff] [blame] | 12349 | if (task) { |
Linus Torvalds | d01e7f1 | 2020-12-15 19:36:48 -0800 | [diff] [blame] | 12350 | err = down_read_interruptible(&task->signal->exec_update_lock); |
peterz@infradead.org | 78af4dc | 2020-08-28 14:37:20 +0200 | [diff] [blame] | 12351 | if (err) |
| 12352 | goto err_file; |
| 12353 | |
| 12354 | /* |
Linus Torvalds | d01e7f1 | 2020-12-15 19:36:48 -0800 | [diff] [blame] | 12355 | * We must hold exec_update_lock across this and any potential |
peterz@infradead.org | 78af4dc | 2020-08-28 14:37:20 +0200 | [diff] [blame] | 12356 | * perf_install_in_context() call for this new event to |
| 12357 | * serialize against exec() altering our credentials (and the |
| 12358 | * perf_event_exit_task() that could imply). |
| 12359 | */ |
| 12360 | err = -EACCES; |
Marco Elver | b068fc0 | 2021-07-05 10:44:53 +0200 | [diff] [blame] | 12361 | if (!perf_check_permission(&attr, task)) |
peterz@infradead.org | 78af4dc | 2020-08-28 14:37:20 +0200 | [diff] [blame] | 12362 | goto err_cred; |
| 12363 | } |
| 12364 | |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12365 | if (move_group) { |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 12366 | gctx = __perf_event_ctx_lock_double(group_leader, ctx); |
| 12367 | |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 12368 | if (gctx->task == TASK_TOMBSTONE) { |
| 12369 | err = -ESRCH; |
| 12370 | goto err_locked; |
| 12371 | } |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 12372 | |
| 12373 | /* |
| 12374 | * Check if we raced against another sys_perf_event_open() call |
| 12375 | * moving the software group underneath us. |
| 12376 | */ |
| 12377 | if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) { |
| 12378 | /* |
| 12379 | * If someone moved the group out from under us, check |
| 12380 | * if this new event wound up on the same ctx, if so |
| 12381 | * its the regular !move_group case, otherwise fail. |
| 12382 | */ |
| 12383 | if (gctx != ctx) { |
| 12384 | err = -EINVAL; |
| 12385 | goto err_locked; |
| 12386 | } else { |
| 12387 | perf_event_ctx_unlock(group_leader, gctx); |
| 12388 | move_group = 0; |
| 12389 | } |
| 12390 | } |
Alexander Shishkin | 8a58dda | 2019-07-01 14:07:55 +0300 | [diff] [blame] | 12391 | |
| 12392 | /* |
| 12393 | * Failure to create exclusive events returns -EBUSY. |
| 12394 | */ |
| 12395 | err = -EBUSY; |
| 12396 | if (!exclusive_event_installable(group_leader, ctx)) |
| 12397 | goto err_locked; |
| 12398 | |
| 12399 | for_each_sibling_event(sibling, group_leader) { |
| 12400 | if (!exclusive_event_installable(sibling, ctx)) |
| 12401 | goto err_locked; |
| 12402 | } |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12403 | } else { |
| 12404 | mutex_lock(&ctx->mutex); |
| 12405 | } |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12406 | |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 12407 | if (ctx->task == TASK_TOMBSTONE) { |
| 12408 | err = -ESRCH; |
| 12409 | goto err_locked; |
| 12410 | } |
| 12411 | |
Peter Zijlstra | a723968 | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12412 | if (!perf_event_validate_size(event)) { |
| 12413 | err = -E2BIG; |
| 12414 | goto err_locked; |
| 12415 | } |
| 12416 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12417 | if (!task) { |
| 12418 | /* |
| 12419 | * Check if the @cpu we're creating an event for is online. |
| 12420 | * |
| 12421 | * We use the perf_cpu_context::ctx::mutex to serialize against |
| 12422 | * the hotplug notifiers. See perf_event_{init,exit}_cpu(). |
| 12423 | */ |
| 12424 | struct perf_cpu_context *cpuctx = |
| 12425 | container_of(ctx, struct perf_cpu_context, ctx); |
| 12426 | |
| 12427 | if (!cpuctx->online) { |
| 12428 | err = -ENODEV; |
| 12429 | goto err_locked; |
| 12430 | } |
| 12431 | } |
| 12432 | |
Mark Rutland | da9ec3d | 2020-01-06 12:03:39 +0000 | [diff] [blame] | 12433 | if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { |
| 12434 | err = -EINVAL; |
Alexander Shishkin | ab43762 | 2019-08-06 11:46:00 +0300 | [diff] [blame] | 12435 | goto err_locked; |
Mark Rutland | da9ec3d | 2020-01-06 12:03:39 +0000 | [diff] [blame] | 12436 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12437 | |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12438 | /* |
| 12439 | * Must be under the same ctx::mutex as perf_install_in_context(), |
| 12440 | * because we need to serialize with concurrent event creation. |
| 12441 | */ |
| 12442 | if (!exclusive_event_installable(event, ctx)) { |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12443 | err = -EBUSY; |
| 12444 | goto err_locked; |
| 12445 | } |
| 12446 | |
| 12447 | WARN_ON_ONCE(ctx->parent_ctx); |
| 12448 | |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 12449 | /* |
| 12450 | * This is the point on no return; we cannot fail hereafter. This is |
| 12451 | * where we start modifying current state. |
| 12452 | */ |
| 12453 | |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12454 | if (move_group) { |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 12455 | /* |
| 12456 | * See perf_event_ctx_lock() for comments on the details |
| 12457 | * of swizzling perf_event::ctx. |
| 12458 | */ |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 12459 | perf_remove_from_context(group_leader, 0); |
Peter Zijlstra | 279b516 | 2017-02-16 10:28:37 +0100 | [diff] [blame] | 12460 | put_ctx(gctx); |
Jiri Olsa | 0231bb5 | 2013-02-01 11:23:45 +0100 | [diff] [blame] | 12461 | |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 12462 | for_each_sibling_event(sibling, group_leader) { |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 12463 | perf_remove_from_context(sibling, 0); |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12464 | put_ctx(gctx); |
| 12465 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12466 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 12467 | /* |
| 12468 | * Wait for everybody to stop referencing the events through |
| 12469 | * the old lists, before installing it on new lists. |
| 12470 | */ |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12471 | synchronize_rcu(); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 12472 | |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 12473 | /* |
| 12474 | * Install the group siblings before the group leader. |
| 12475 | * |
| 12476 | * Because a group leader will try and install the entire group |
| 12477 | * (through the sibling list, which is still in-tact), we can |
| 12478 | * end up with siblings installed in the wrong context. |
| 12479 | * |
| 12480 | * By installing siblings first we NO-OP because they're not |
| 12481 | * reachable through the group lists. |
| 12482 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 12483 | for_each_sibling_event(sibling, group_leader) { |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 12484 | perf_event__state_init(sibling); |
Jiri Olsa | 9fc81d8 | 2014-12-10 21:23:51 +0100 | [diff] [blame] | 12485 | perf_install_in_context(ctx, sibling, sibling->cpu); |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12486 | get_ctx(ctx); |
| 12487 | } |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 12488 | |
| 12489 | /* |
| 12490 | * Removing from the context ends up with disabled |
| 12491 | * event. What we want here is event in the initial |
| 12492 | * startup state, ready to be add into new context. |
| 12493 | */ |
| 12494 | perf_event__state_init(group_leader); |
| 12495 | perf_install_in_context(ctx, group_leader, group_leader->cpu); |
| 12496 | get_ctx(ctx); |
Peter Zijlstra | b04243e | 2010-09-17 11:28:48 +0200 | [diff] [blame] | 12497 | } |
| 12498 | |
Peter Zijlstra | f73e22a | 2015-09-09 20:48:22 +0200 | [diff] [blame] | 12499 | /* |
| 12500 | * Precalculate sample_data sizes; do while holding ctx::mutex such |
| 12501 | * that we're serialized against further additions and before |
| 12502 | * perf_install_in_context() which is the point the event is active and |
| 12503 | * can use these values. |
| 12504 | */ |
| 12505 | perf_event__header_size(event); |
| 12506 | perf_event__id_header_size(event); |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 12507 | |
Peter Zijlstra | 78cd2c7 | 2016-01-25 14:08:45 +0100 | [diff] [blame] | 12508 | event->owner = current; |
| 12509 | |
Yan, Zheng | e2d37cd | 2012-06-15 14:31:32 +0800 | [diff] [blame] | 12510 | perf_install_in_context(ctx, event, event->cpu); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 12511 | perf_unpin_context(ctx); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 12512 | |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12513 | if (move_group) |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 12514 | perf_event_ctx_unlock(group_leader, gctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12515 | mutex_unlock(&ctx->mutex); |
| 12516 | |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 12517 | if (task) { |
Eric W. Biederman | f7cfd87 | 2020-12-03 14:12:00 -0600 | [diff] [blame] | 12518 | up_read(&task->signal->exec_update_lock); |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 12519 | put_task_struct(task); |
| 12520 | } |
| 12521 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12522 | mutex_lock(¤t->perf_event_mutex); |
| 12523 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); |
| 12524 | mutex_unlock(¤t->perf_event_mutex); |
| 12525 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 12526 | /* |
| 12527 | * Drop the reference on the group_event after placing the |
| 12528 | * new event on the sibling_list. This ensures destruction |
| 12529 | * of the group leader will find the pointer to itself in |
| 12530 | * perf_group_detach(). |
| 12531 | */ |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 12532 | fdput(group); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 12533 | fd_install(event_fd, event_file); |
| 12534 | return event_fd; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12535 | |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12536 | err_locked: |
| 12537 | if (move_group) |
Peter Zijlstra | 321027c | 2017-01-11 21:09:50 +0100 | [diff] [blame] | 12538 | perf_event_ctx_unlock(group_leader, gctx); |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12539 | mutex_unlock(&ctx->mutex); |
peterz@infradead.org | 78af4dc | 2020-08-28 14:37:20 +0200 | [diff] [blame] | 12540 | err_cred: |
| 12541 | if (task) |
Linus Torvalds | d01e7f1 | 2020-12-15 19:36:48 -0800 | [diff] [blame] | 12542 | up_read(&task->signal->exec_update_lock); |
peterz@infradead.org | 78af4dc | 2020-08-28 14:37:20 +0200 | [diff] [blame] | 12543 | err_file: |
Peter Zijlstra | f55fc2a | 2015-09-09 19:06:33 +0200 | [diff] [blame] | 12544 | fput(event_file); |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12545 | err_context: |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 12546 | perf_unpin_context(ctx); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 12547 | put_ctx(ctx); |
Peter Zijlstra | c6be5a5 | 2010-10-14 16:59:46 +0200 | [diff] [blame] | 12548 | err_alloc: |
Peter Zijlstra | 1300562 | 2016-02-24 18:45:41 +0100 | [diff] [blame] | 12549 | /* |
| 12550 | * If event_file is set, the fput() above will have called ->release() |
| 12551 | * and that will take care of freeing the event. |
| 12552 | */ |
| 12553 | if (!event_file) |
| 12554 | free_event(event); |
Peter Zijlstra | 1f4ee50 | 2014-05-06 09:59:34 +0200 | [diff] [blame] | 12555 | err_task: |
Peter Zijlstra | e7d0bc0 | 2010-10-14 16:54:51 +0200 | [diff] [blame] | 12556 | if (task) |
| 12557 | put_task_struct(task); |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 12558 | err_group_fd: |
Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 12559 | fdput(group); |
Al Viro | ea635c6 | 2010-05-26 17:40:29 -0400 | [diff] [blame] | 12560 | err_fd: |
| 12561 | put_unused_fd(event_fd); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12562 | return err; |
| 12563 | } |
| 12564 | |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12565 | /** |
| 12566 | * perf_event_create_kernel_counter |
| 12567 | * |
| 12568 | * @attr: attributes of the counter to create |
| 12569 | * @cpu: cpu in which the counter is bound |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 12570 | * @task: task to profile (NULL for percpu) |
Haocheng Xie | a1ddf52 | 2021-05-27 11:19:46 +0800 | [diff] [blame] | 12571 | * @overflow_handler: callback to trigger when we hit the event |
| 12572 | * @context: context data could be used in overflow_handler callback |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12573 | */ |
| 12574 | struct perf_event * |
| 12575 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 12576 | struct task_struct *task, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 12577 | perf_overflow_handler_t overflow_handler, |
| 12578 | void *context) |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12579 | { |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12580 | struct perf_event_context *ctx; |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12581 | struct perf_event *event; |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12582 | int err; |
| 12583 | |
Alexander Shishkin | dce5aff | 2019-10-30 15:47:31 +0200 | [diff] [blame] | 12584 | /* |
| 12585 | * Grouping is not supported for kernel events, neither is 'AUX', |
| 12586 | * make sure the caller's intentions are adjusted. |
| 12587 | */ |
| 12588 | if (attr->aux_output) |
| 12589 | return ERR_PTR(-EINVAL); |
| 12590 | |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 12591 | event = perf_event_alloc(attr, cpu, task, NULL, NULL, |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 12592 | overflow_handler, context, -1); |
Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 12593 | if (IS_ERR(event)) { |
| 12594 | err = PTR_ERR(event); |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12595 | goto err; |
| 12596 | } |
| 12597 | |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 12598 | /* Mark owner so we could distinguish it from user events. */ |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 12599 | event->owner = TASK_TOMBSTONE; |
Jiri Olsa | f869776 | 2014-08-01 14:33:01 +0200 | [diff] [blame] | 12600 | |
Alexander Shishkin | f25d8ba | 2019-10-30 15:47:30 +0200 | [diff] [blame] | 12601 | /* |
| 12602 | * Get the target context (task or percpu): |
| 12603 | */ |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 12604 | ctx = find_get_context(event->pmu, task, event); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12605 | if (IS_ERR(ctx)) { |
| 12606 | err = PTR_ERR(ctx); |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12607 | goto err_free; |
Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 12608 | } |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12609 | |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12610 | WARN_ON_ONCE(ctx->parent_ctx); |
| 12611 | mutex_lock(&ctx->mutex); |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 12612 | if (ctx->task == TASK_TOMBSTONE) { |
| 12613 | err = -ESRCH; |
| 12614 | goto err_unlock; |
| 12615 | } |
| 12616 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 12617 | if (!task) { |
| 12618 | /* |
| 12619 | * Check if the @cpu we're creating an event for is online. |
| 12620 | * |
| 12621 | * We use the perf_cpu_context::ctx::mutex to serialize against |
| 12622 | * the hotplug notifiers. See perf_event_{init,exit}_cpu(). |
| 12623 | */ |
| 12624 | struct perf_cpu_context *cpuctx = |
| 12625 | container_of(ctx, struct perf_cpu_context, ctx); |
| 12626 | if (!cpuctx->online) { |
| 12627 | err = -ENODEV; |
| 12628 | goto err_unlock; |
| 12629 | } |
| 12630 | } |
| 12631 | |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 12632 | if (!exclusive_event_installable(event, ctx)) { |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 12633 | err = -EBUSY; |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 12634 | goto err_unlock; |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 12635 | } |
| 12636 | |
Leonard Crestez | 4ce54af | 2019-07-24 15:53:24 +0300 | [diff] [blame] | 12637 | perf_install_in_context(ctx, event, event->cpu); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 12638 | perf_unpin_context(ctx); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12639 | mutex_unlock(&ctx->mutex); |
| 12640 | |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12641 | return event; |
| 12642 | |
Peter Zijlstra | 84c4e62 | 2016-02-24 18:45:40 +0100 | [diff] [blame] | 12643 | err_unlock: |
| 12644 | mutex_unlock(&ctx->mutex); |
| 12645 | perf_unpin_context(ctx); |
| 12646 | put_ctx(ctx); |
Peter Zijlstra | c3f00c7 | 2010-08-18 14:37:15 +0200 | [diff] [blame] | 12647 | err_free: |
| 12648 | free_event(event); |
| 12649 | err: |
Frederic Weisbecker | c6567f6 | 2009-11-26 05:35:41 +0100 | [diff] [blame] | 12650 | return ERR_PTR(err); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 12651 | } |
| 12652 | EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); |
| 12653 | |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12654 | void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) |
| 12655 | { |
| 12656 | struct perf_event_context *src_ctx; |
| 12657 | struct perf_event_context *dst_ctx; |
| 12658 | struct perf_event *event, *tmp; |
| 12659 | LIST_HEAD(events); |
| 12660 | |
| 12661 | src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; |
| 12662 | dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; |
| 12663 | |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 12664 | /* |
| 12665 | * See perf_event_ctx_lock() for comments on the details |
| 12666 | * of swizzling perf_event::ctx. |
| 12667 | */ |
| 12668 | mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12669 | list_for_each_entry_safe(event, tmp, &src_ctx->event_list, |
| 12670 | event_entry) { |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 12671 | perf_remove_from_context(event, 0); |
Frederic Weisbecker | 9a545de | 2013-07-23 02:31:03 +0200 | [diff] [blame] | 12672 | unaccount_event_cpu(event, src_cpu); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12673 | put_ctx(src_ctx); |
Peter Zijlstra | 9886167 | 2013-10-03 16:02:23 +0200 | [diff] [blame] | 12674 | list_add(&event->migrate_entry, &events); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12675 | } |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12676 | |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 12677 | /* |
| 12678 | * Wait for the events to quiesce before re-instating them. |
| 12679 | */ |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12680 | synchronize_rcu(); |
| 12681 | |
Peter Zijlstra (Intel) | 8f95b43 | 2015-01-27 11:53:12 +0100 | [diff] [blame] | 12682 | /* |
| 12683 | * Re-instate events in 2 passes. |
| 12684 | * |
| 12685 | * Skip over group leaders and only install siblings on this first |
| 12686 | * pass, siblings will not get enabled without a leader, however a |
| 12687 | * leader will enable its siblings, even if those are still on the old |
| 12688 | * context. |
| 12689 | */ |
| 12690 | list_for_each_entry_safe(event, tmp, &events, migrate_entry) { |
| 12691 | if (event->group_leader == event) |
| 12692 | continue; |
| 12693 | |
| 12694 | list_del(&event->migrate_entry); |
| 12695 | if (event->state >= PERF_EVENT_STATE_OFF) |
| 12696 | event->state = PERF_EVENT_STATE_INACTIVE; |
| 12697 | account_event_cpu(event, dst_cpu); |
| 12698 | perf_install_in_context(dst_ctx, event, dst_cpu); |
| 12699 | get_ctx(dst_ctx); |
| 12700 | } |
| 12701 | |
| 12702 | /* |
| 12703 | * Once all the siblings are setup properly, install the group leaders |
| 12704 | * to make it go. |
| 12705 | */ |
Peter Zijlstra | 9886167 | 2013-10-03 16:02:23 +0200 | [diff] [blame] | 12706 | list_for_each_entry_safe(event, tmp, &events, migrate_entry) { |
| 12707 | list_del(&event->migrate_entry); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12708 | if (event->state >= PERF_EVENT_STATE_OFF) |
| 12709 | event->state = PERF_EVENT_STATE_INACTIVE; |
Frederic Weisbecker | 9a545de | 2013-07-23 02:31:03 +0200 | [diff] [blame] | 12710 | account_event_cpu(event, dst_cpu); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12711 | perf_install_in_context(dst_ctx, event, dst_cpu); |
| 12712 | get_ctx(dst_ctx); |
| 12713 | } |
| 12714 | mutex_unlock(&dst_ctx->mutex); |
Peter Zijlstra | f63a8da | 2015-01-23 12:24:14 +0100 | [diff] [blame] | 12715 | mutex_unlock(&src_ctx->mutex); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 12716 | } |
| 12717 | EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); |
| 12718 | |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 12719 | static void sync_child_event(struct perf_event *child_event) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12720 | { |
| 12721 | struct perf_event *parent_event = child_event->parent; |
| 12722 | u64 child_val; |
| 12723 | |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 12724 | if (child_event->attr.inherit_stat) { |
| 12725 | struct task_struct *task = child_event->ctx->task; |
| 12726 | |
| 12727 | if (task && task != TASK_TOMBSTONE) |
| 12728 | perf_event_read_event(child_event, task); |
| 12729 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12730 | |
Peter Zijlstra | b5e5879 | 2010-05-21 14:43:12 +0200 | [diff] [blame] | 12731 | child_val = perf_event_count(child_event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12732 | |
| 12733 | /* |
| 12734 | * Add back the child's count to the parent's count: |
| 12735 | */ |
Peter Zijlstra | a6e6dea | 2010-05-21 14:27:58 +0200 | [diff] [blame] | 12736 | atomic64_add(child_val, &parent_event->child_count); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12737 | atomic64_add(child_event->total_time_enabled, |
| 12738 | &parent_event->child_total_time_enabled); |
| 12739 | atomic64_add(child_event->total_time_running, |
| 12740 | &parent_event->child_total_time_running); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12741 | } |
| 12742 | |
| 12743 | static void |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 12744 | perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12745 | { |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 12746 | struct perf_event *parent_event = event->parent; |
| 12747 | unsigned long detach_flags = 0; |
| 12748 | |
| 12749 | if (parent_event) { |
| 12750 | /* |
| 12751 | * Do not destroy the 'original' grouping; because of the |
| 12752 | * context switch optimization the original events could've |
| 12753 | * ended up in a random child task. |
| 12754 | * |
| 12755 | * If we were to destroy the original group, all group related |
| 12756 | * operations would cease to function properly after this |
| 12757 | * random child dies. |
| 12758 | * |
| 12759 | * Do destroy all inherited groups, we don't care about those |
| 12760 | * and being thorough is better. |
| 12761 | */ |
| 12762 | detach_flags = DETACH_GROUP | DETACH_CHILD; |
| 12763 | mutex_lock(&parent_event->child_mutex); |
| 12764 | } |
| 12765 | |
| 12766 | perf_remove_from_context(event, detach_flags); |
| 12767 | |
| 12768 | raw_spin_lock_irq(&ctx->lock); |
| 12769 | if (event->state > PERF_EVENT_STATE_EXIT) |
| 12770 | perf_event_set_state(event, PERF_EVENT_STATE_EXIT); |
| 12771 | raw_spin_unlock_irq(&ctx->lock); |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 12772 | |
Peter Zijlstra | 1903d50 | 2014-07-15 17:27:27 +0200 | [diff] [blame] | 12773 | /* |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 12774 | * Child events can be freed. |
Peter Zijlstra | 1903d50 | 2014-07-15 17:27:27 +0200 | [diff] [blame] | 12775 | */ |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 12776 | if (parent_event) { |
| 12777 | mutex_unlock(&parent_event->child_mutex); |
| 12778 | /* |
| 12779 | * Kick perf_poll() for is_event_hup(); |
| 12780 | */ |
| 12781 | perf_event_wakeup(parent_event); |
| 12782 | free_event(event); |
| 12783 | put_event(parent_event); |
| 12784 | return; |
| 12785 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12786 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12787 | /* |
Peter Zijlstra | 8ba289b | 2016-01-26 13:06:56 +0100 | [diff] [blame] | 12788 | * Parent events are governed by their filedesc, retain them. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12789 | */ |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 12790 | perf_event_wakeup(event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12791 | } |
| 12792 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12793 | static void perf_event_exit_task_context(struct task_struct *child, int ctxn) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12794 | { |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 12795 | struct perf_event_context *child_ctx, *clone_ctx = NULL; |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 12796 | struct perf_event *child_event, *next; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12797 | |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 12798 | WARN_ON_ONCE(child != current); |
| 12799 | |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 12800 | child_ctx = perf_pin_task_context(child, ctxn); |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 12801 | if (!child_ctx) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12802 | return; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12803 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12804 | /* |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 12805 | * In order to reduce the amount of tricky in ctx tear-down, we hold |
| 12806 | * ctx::mutex over the entire thing. This serializes against almost |
| 12807 | * everything that wants to access the ctx. |
| 12808 | * |
| 12809 | * The exception is sys_perf_event_open() / |
| 12810 | * perf_event_create_kernel_count() which does find_get_context() |
| 12811 | * without ctx::mutex (it cannot because of the move_group double mutex |
| 12812 | * lock thing). See the comments in perf_install_in_context(). |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12813 | */ |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 12814 | mutex_lock(&child_ctx->mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12815 | |
| 12816 | /* |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 12817 | * In a single ctx::lock section, de-schedule the events and detach the |
| 12818 | * context from the task such that we cannot ever get it scheduled back |
| 12819 | * in. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12820 | */ |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 12821 | raw_spin_lock_irq(&child_ctx->lock); |
Alexander Shishkin | 487f05e | 2017-01-19 18:43:30 +0200 | [diff] [blame] | 12822 | task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL); |
Peter Zijlstra | 4a1c0f2 | 2014-06-23 16:12:42 +0200 | [diff] [blame] | 12823 | |
| 12824 | /* |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 12825 | * Now that the context is inactive, destroy the task <-> ctx relation |
| 12826 | * and mark the context dead. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12827 | */ |
Peter Zijlstra | 63b6da3 | 2016-01-14 16:05:37 +0100 | [diff] [blame] | 12828 | RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL); |
| 12829 | put_ctx(child_ctx); /* cannot be last */ |
| 12830 | WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); |
| 12831 | put_task_struct(current); /* cannot be last */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12832 | |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 12833 | clone_ctx = unclone_ctx(child_ctx); |
Peter Zijlstra | 6a3351b | 2016-01-25 14:09:54 +0100 | [diff] [blame] | 12834 | raw_spin_unlock_irq(&child_ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12835 | |
Peter Zijlstra | 211de6e | 2014-09-30 19:23:08 +0200 | [diff] [blame] | 12836 | if (clone_ctx) |
| 12837 | put_ctx(clone_ctx); |
Peter Zijlstra | 4a1c0f2 | 2014-06-23 16:12:42 +0200 | [diff] [blame] | 12838 | |
| 12839 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12840 | * Report the task dead after unscheduling the events so that we |
| 12841 | * won't get any samples after PERF_RECORD_EXIT. We can however still |
| 12842 | * get a few PERF_RECORD_READ events. |
| 12843 | */ |
| 12844 | perf_event_task(child, child_ctx, 0); |
| 12845 | |
Peter Zijlstra | ebf905f | 2014-05-29 19:00:24 +0200 | [diff] [blame] | 12846 | list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 12847 | perf_event_exit_event(child_event, child_ctx); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12848 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12849 | mutex_unlock(&child_ctx->mutex); |
| 12850 | |
| 12851 | put_ctx(child_ctx); |
| 12852 | } |
| 12853 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12854 | /* |
| 12855 | * When a child task exits, feed back event values to parent events. |
Peter Zijlstra | 79c9ce5 | 2016-04-26 11:36:53 +0200 | [diff] [blame] | 12856 | * |
Eric W. Biederman | f7cfd87 | 2020-12-03 14:12:00 -0600 | [diff] [blame] | 12857 | * Can be called with exec_update_lock held when called from |
Eric W. Biederman | 96ecee2 | 2020-05-03 06:48:17 -0500 | [diff] [blame] | 12858 | * setup_new_exec(). |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12859 | */ |
| 12860 | void perf_event_exit_task(struct task_struct *child) |
| 12861 | { |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 12862 | struct perf_event *event, *tmp; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12863 | int ctxn; |
| 12864 | |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 12865 | mutex_lock(&child->perf_event_mutex); |
| 12866 | list_for_each_entry_safe(event, tmp, &child->perf_event_list, |
| 12867 | owner_entry) { |
| 12868 | list_del_init(&event->owner_entry); |
| 12869 | |
| 12870 | /* |
| 12871 | * Ensure the list deletion is visible before we clear |
| 12872 | * the owner, closes a race against perf_release() where |
| 12873 | * we need to serialize on the owner->perf_event_mutex. |
| 12874 | */ |
Peter Zijlstra | f47c02c | 2016-01-26 12:30:14 +0100 | [diff] [blame] | 12875 | smp_store_release(&event->owner, NULL); |
Peter Zijlstra | 8882135 | 2010-11-09 19:01:43 +0100 | [diff] [blame] | 12876 | } |
| 12877 | mutex_unlock(&child->perf_event_mutex); |
| 12878 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12879 | for_each_task_context_nr(ctxn) |
| 12880 | perf_event_exit_task_context(child, ctxn); |
Jiri Olsa | 4e93ad6 | 2015-11-04 16:00:05 +0100 | [diff] [blame] | 12881 | |
| 12882 | /* |
| 12883 | * The perf_event_exit_task_context calls perf_event_task |
| 12884 | * with child's task_ctx, which generates EXIT events for |
| 12885 | * child contexts and sets child->perf_event_ctxp[] to NULL. |
| 12886 | * At this point we need to send EXIT events to cpu contexts. |
| 12887 | */ |
| 12888 | perf_event_task(child, NULL, 0); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12889 | } |
| 12890 | |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12891 | static void perf_free_event(struct perf_event *event, |
| 12892 | struct perf_event_context *ctx) |
| 12893 | { |
| 12894 | struct perf_event *parent = event->parent; |
| 12895 | |
| 12896 | if (WARN_ON_ONCE(!parent)) |
| 12897 | return; |
| 12898 | |
| 12899 | mutex_lock(&parent->child_mutex); |
| 12900 | list_del_init(&event->child_list); |
| 12901 | mutex_unlock(&parent->child_mutex); |
| 12902 | |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 12903 | put_event(parent); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12904 | |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 12905 | raw_spin_lock_irq(&ctx->lock); |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 12906 | perf_group_detach(event); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12907 | list_del_event(event, ctx); |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 12908 | raw_spin_unlock_irq(&ctx->lock); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 12909 | free_event(event); |
| 12910 | } |
| 12911 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12912 | /* |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 12913 | * Free a context as created by inheritance by perf_event_init_task() below, |
| 12914 | * used by fork() in case of fail. |
Peter Zijlstra | 652884f | 2015-01-23 11:20:10 +0100 | [diff] [blame] | 12915 | * |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 12916 | * Even though the task has never lived, the context and events have been |
| 12917 | * exposed through the child_list, so we must take care tearing it all down. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12918 | */ |
| 12919 | void perf_event_free_task(struct task_struct *task) |
| 12920 | { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12921 | struct perf_event_context *ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12922 | struct perf_event *event, *tmp; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12923 | int ctxn; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12924 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12925 | for_each_task_context_nr(ctxn) { |
| 12926 | ctx = task->perf_event_ctxp[ctxn]; |
| 12927 | if (!ctx) |
| 12928 | continue; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12929 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12930 | mutex_lock(&ctx->mutex); |
Peter Zijlstra | e552a83 | 2017-03-16 13:47:48 +0100 | [diff] [blame] | 12931 | raw_spin_lock_irq(&ctx->lock); |
| 12932 | /* |
| 12933 | * Destroy the task <-> ctx relation and mark the context dead. |
| 12934 | * |
| 12935 | * This is important because even though the task hasn't been |
| 12936 | * exposed yet the context has been (through child_list). |
| 12937 | */ |
| 12938 | RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); |
| 12939 | WRITE_ONCE(ctx->task, TASK_TOMBSTONE); |
| 12940 | put_task_struct(task); /* cannot be last */ |
| 12941 | raw_spin_unlock_irq(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12942 | |
Peter Zijlstra | 15121c7 | 2017-03-16 13:47:50 +0100 | [diff] [blame] | 12943 | list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12944 | perf_free_event(event, ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12945 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12946 | mutex_unlock(&ctx->mutex); |
Peter Zijlstra | 1cf8dfe | 2019-07-13 11:21:25 +0200 | [diff] [blame] | 12947 | |
| 12948 | /* |
| 12949 | * perf_event_release_kernel() could've stolen some of our |
| 12950 | * child events and still have them on its free_list. In that |
| 12951 | * case we must wait for these events to have been freed (in |
| 12952 | * particular all their references to this task must've been |
| 12953 | * dropped). |
| 12954 | * |
| 12955 | * Without this copy_process() will unconditionally free this |
| 12956 | * task (irrespective of its reference count) and |
| 12957 | * _free_event()'s put_task_struct(event->hw.target) will be a |
| 12958 | * use-after-free. |
| 12959 | * |
| 12960 | * Wait for all events to drop their context reference. |
| 12961 | */ |
| 12962 | wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1); |
| 12963 | put_ctx(ctx); /* must be last */ |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 12964 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 12965 | } |
| 12966 | |
Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 12967 | void perf_event_delayed_put(struct task_struct *task) |
| 12968 | { |
| 12969 | int ctxn; |
| 12970 | |
| 12971 | for_each_task_context_nr(ctxn) |
| 12972 | WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); |
| 12973 | } |
| 12974 | |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 12975 | struct file *perf_event_get(unsigned int fd) |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12976 | { |
Al Viro | 02e5ad9 | 2019-06-26 20:43:53 -0400 | [diff] [blame] | 12977 | struct file *file = fget(fd); |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 12978 | if (!file) |
| 12979 | return ERR_PTR(-EBADF); |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12980 | |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 12981 | if (file->f_op != &perf_fops) { |
| 12982 | fput(file); |
| 12983 | return ERR_PTR(-EBADF); |
| 12984 | } |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12985 | |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 12986 | return file; |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12987 | } |
| 12988 | |
Yonghong Song | f8d959a | 2018-05-24 11:21:08 -0700 | [diff] [blame] | 12989 | const struct perf_event *perf_get_event(struct file *file) |
| 12990 | { |
| 12991 | if (file->f_op != &perf_fops) |
| 12992 | return ERR_PTR(-EINVAL); |
| 12993 | |
| 12994 | return file->private_data; |
| 12995 | } |
| 12996 | |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 12997 | const struct perf_event_attr *perf_event_attrs(struct perf_event *event) |
| 12998 | { |
| 12999 | if (!event) |
| 13000 | return ERR_PTR(-EINVAL); |
| 13001 | |
| 13002 | return &event->attr; |
| 13003 | } |
| 13004 | |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13005 | /* |
Tobias Tefke | 788faab | 2018-07-09 12:57:15 +0200 | [diff] [blame] | 13006 | * Inherit an event from parent task to child task. |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 13007 | * |
| 13008 | * Returns: |
| 13009 | * - valid pointer on success |
| 13010 | * - NULL for orphaned events |
| 13011 | * - IS_ERR() on error |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13012 | */ |
| 13013 | static struct perf_event * |
| 13014 | inherit_event(struct perf_event *parent_event, |
| 13015 | struct task_struct *parent, |
| 13016 | struct perf_event_context *parent_ctx, |
| 13017 | struct task_struct *child, |
| 13018 | struct perf_event *group_leader, |
| 13019 | struct perf_event_context *child_ctx) |
| 13020 | { |
Peter Zijlstra | 8ca2bd4 | 2017-09-05 14:12:35 +0200 | [diff] [blame] | 13021 | enum perf_event_state parent_state = parent_event->state; |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13022 | struct perf_event *child_event; |
Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 13023 | unsigned long flags; |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13024 | |
| 13025 | /* |
| 13026 | * Instead of creating recursive hierarchies of events, |
| 13027 | * we link inherited events back to the original parent, |
| 13028 | * which has a filp for sure, which we use as the reference |
| 13029 | * count: |
| 13030 | */ |
| 13031 | if (parent_event->parent) |
| 13032 | parent_event = parent_event->parent; |
| 13033 | |
| 13034 | child_event = perf_event_alloc(&parent_event->attr, |
| 13035 | parent_event->cpu, |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 13036 | child, |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13037 | group_leader, parent_event, |
Matt Fleming | 79dff51 | 2015-01-23 18:45:42 +0000 | [diff] [blame] | 13038 | NULL, NULL, -1); |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13039 | if (IS_ERR(child_event)) |
| 13040 | return child_event; |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 13041 | |
Jiri Olsa | 313ccb9 | 2018-01-07 17:03:47 +0100 | [diff] [blame] | 13042 | |
| 13043 | if ((child_event->attach_state & PERF_ATTACH_TASK_DATA) && |
| 13044 | !child_ctx->task_ctx_data) { |
| 13045 | struct pmu *pmu = child_event->pmu; |
| 13046 | |
Kan Liang | ff9ff92 | 2020-07-03 05:49:21 -0700 | [diff] [blame] | 13047 | child_ctx->task_ctx_data = alloc_task_ctx_data(pmu); |
Jiri Olsa | 313ccb9 | 2018-01-07 17:03:47 +0100 | [diff] [blame] | 13048 | if (!child_ctx->task_ctx_data) { |
| 13049 | free_event(child_event); |
Alexander Shishkin | 697d877 | 2019-11-05 09:57:02 +0200 | [diff] [blame] | 13050 | return ERR_PTR(-ENOMEM); |
Jiri Olsa | 313ccb9 | 2018-01-07 17:03:47 +0100 | [diff] [blame] | 13051 | } |
| 13052 | } |
| 13053 | |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 13054 | /* |
| 13055 | * is_orphaned_event() and list_add_tail(&parent_event->child_list) |
| 13056 | * must be under the same lock in order to serialize against |
| 13057 | * perf_event_release_kernel(), such that either we must observe |
| 13058 | * is_orphaned_event() or they will observe us on the child_list. |
| 13059 | */ |
| 13060 | mutex_lock(&parent_event->child_mutex); |
Jiri Olsa | fadfe7b | 2014-08-01 14:33:02 +0200 | [diff] [blame] | 13061 | if (is_orphaned_event(parent_event) || |
| 13062 | !atomic_long_inc_not_zero(&parent_event->refcount)) { |
Peter Zijlstra | c6e5b73 | 2016-01-15 16:07:41 +0200 | [diff] [blame] | 13063 | mutex_unlock(&parent_event->child_mutex); |
Jiri Olsa | 313ccb9 | 2018-01-07 17:03:47 +0100 | [diff] [blame] | 13064 | /* task_ctx_data is freed with child_ctx */ |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 13065 | free_event(child_event); |
| 13066 | return NULL; |
| 13067 | } |
| 13068 | |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13069 | get_ctx(child_ctx); |
| 13070 | |
| 13071 | /* |
| 13072 | * Make the child state follow the state of the parent event, |
| 13073 | * not its attr.disabled bit. We hold the parent's mutex, |
| 13074 | * so we won't race with perf_event_{en, dis}able_family. |
| 13075 | */ |
Jiri Olsa | 1929def | 2014-09-12 13:18:27 +0200 | [diff] [blame] | 13076 | if (parent_state >= PERF_EVENT_STATE_INACTIVE) |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13077 | child_event->state = PERF_EVENT_STATE_INACTIVE; |
| 13078 | else |
| 13079 | child_event->state = PERF_EVENT_STATE_OFF; |
| 13080 | |
| 13081 | if (parent_event->attr.freq) { |
| 13082 | u64 sample_period = parent_event->hw.sample_period; |
| 13083 | struct hw_perf_event *hwc = &child_event->hw; |
| 13084 | |
| 13085 | hwc->sample_period = sample_period; |
| 13086 | hwc->last_period = sample_period; |
| 13087 | |
| 13088 | local64_set(&hwc->period_left, sample_period); |
| 13089 | } |
| 13090 | |
| 13091 | child_event->ctx = child_ctx; |
| 13092 | child_event->overflow_handler = parent_event->overflow_handler; |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 13093 | child_event->overflow_handler_context |
| 13094 | = parent_event->overflow_handler_context; |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13095 | |
| 13096 | /* |
Thomas Gleixner | 614b678 | 2010-12-03 16:24:32 -0200 | [diff] [blame] | 13097 | * Precalculate sample_data sizes |
| 13098 | */ |
| 13099 | perf_event__header_size(child_event); |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 13100 | perf_event__id_header_size(child_event); |
Thomas Gleixner | 614b678 | 2010-12-03 16:24:32 -0200 | [diff] [blame] | 13101 | |
| 13102 | /* |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13103 | * Link it up in the child's context: |
| 13104 | */ |
Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 13105 | raw_spin_lock_irqsave(&child_ctx->lock, flags); |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13106 | add_event_to_ctx(child_event, child_ctx); |
Peter Zijlstra | ef54c1a | 2021-04-08 12:35:56 +0200 | [diff] [blame] | 13107 | child_event->attach_state |= PERF_ATTACH_CHILD; |
Peter Zijlstra | cee010e | 2010-09-10 12:51:54 +0200 | [diff] [blame] | 13108 | raw_spin_unlock_irqrestore(&child_ctx->lock, flags); |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13109 | |
| 13110 | /* |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13111 | * Link this into the parent event's child list |
| 13112 | */ |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13113 | list_add_tail(&child_event->child_list, &parent_event->child_list); |
| 13114 | mutex_unlock(&parent_event->child_mutex); |
| 13115 | |
| 13116 | return child_event; |
| 13117 | } |
| 13118 | |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 13119 | /* |
| 13120 | * Inherits an event group. |
| 13121 | * |
| 13122 | * This will quietly suppress orphaned events; !inherit_event() is not an error. |
| 13123 | * This matches with perf_event_release_kernel() removing all child events. |
| 13124 | * |
| 13125 | * Returns: |
| 13126 | * - 0 on success |
| 13127 | * - <0 on error |
| 13128 | */ |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13129 | static int inherit_group(struct perf_event *parent_event, |
| 13130 | struct task_struct *parent, |
| 13131 | struct perf_event_context *parent_ctx, |
| 13132 | struct task_struct *child, |
| 13133 | struct perf_event_context *child_ctx) |
| 13134 | { |
| 13135 | struct perf_event *leader; |
| 13136 | struct perf_event *sub; |
| 13137 | struct perf_event *child_ctr; |
| 13138 | |
| 13139 | leader = inherit_event(parent_event, parent, parent_ctx, |
| 13140 | child, NULL, child_ctx); |
| 13141 | if (IS_ERR(leader)) |
| 13142 | return PTR_ERR(leader); |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 13143 | /* |
| 13144 | * @leader can be NULL here because of is_orphaned_event(). In this |
| 13145 | * case inherit_event() will create individual events, similar to what |
| 13146 | * perf_group_detach() would do anyway. |
| 13147 | */ |
Peter Zijlstra | edb3959 | 2018-03-15 17:36:56 +0100 | [diff] [blame] | 13148 | for_each_sibling_event(sub, parent_event) { |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13149 | child_ctr = inherit_event(sub, parent, parent_ctx, |
| 13150 | child, leader, child_ctx); |
| 13151 | if (IS_ERR(child_ctr)) |
| 13152 | return PTR_ERR(child_ctr); |
Alexander Shishkin | f733c6b | 2019-10-04 15:57:29 +0300 | [diff] [blame] | 13153 | |
Alexander Shishkin | 00496fe | 2019-11-01 17:12:48 +0200 | [diff] [blame] | 13154 | if (sub->aux_event == parent_event && child_ctr && |
Alexander Shishkin | f733c6b | 2019-10-04 15:57:29 +0300 | [diff] [blame] | 13155 | !perf_get_aux_event(child_ctr, leader)) |
| 13156 | return -EINVAL; |
Peter Zijlstra | 97dee4f | 2010-09-07 15:35:33 +0200 | [diff] [blame] | 13157 | } |
| 13158 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13159 | } |
| 13160 | |
Peter Zijlstra | d8a8cfc | 2017-03-16 13:47:51 +0100 | [diff] [blame] | 13161 | /* |
| 13162 | * Creates the child task context and tries to inherit the event-group. |
| 13163 | * |
| 13164 | * Clears @inherited_all on !attr.inherited or error. Note that we'll leave |
| 13165 | * inherited_all set when we 'fail' to inherit an orphaned event; this is |
| 13166 | * consistent with perf_event_release_kernel() removing all child events. |
| 13167 | * |
| 13168 | * Returns: |
| 13169 | * - 0 on success |
| 13170 | * - <0 on error |
| 13171 | */ |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13172 | static int |
| 13173 | inherit_task_group(struct perf_event *event, struct task_struct *parent, |
| 13174 | struct perf_event_context *parent_ctx, |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13175 | struct task_struct *child, int ctxn, |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 13176 | u64 clone_flags, int *inherited_all) |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13177 | { |
| 13178 | int ret; |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13179 | struct perf_event_context *child_ctx; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13180 | |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 13181 | if (!event->attr.inherit || |
Marco Elver | 97ba62b | 2021-04-08 12:36:01 +0200 | [diff] [blame] | 13182 | (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || |
| 13183 | /* Do not inherit if sigtrap and signal handlers were cleared. */ |
| 13184 | (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13185 | *inherited_all = 0; |
| 13186 | return 0; |
| 13187 | } |
| 13188 | |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 13189 | child_ctx = child->perf_event_ctxp[ctxn]; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13190 | if (!child_ctx) { |
| 13191 | /* |
| 13192 | * This is executed from the parent task context, so |
| 13193 | * inherit events that have been marked for cloning. |
| 13194 | * First allocate and initialize a context for the |
| 13195 | * child. |
| 13196 | */ |
Jiri Olsa | 734df5a | 2013-07-09 17:44:10 +0200 | [diff] [blame] | 13197 | child_ctx = alloc_perf_context(parent_ctx->pmu, child); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13198 | if (!child_ctx) |
| 13199 | return -ENOMEM; |
| 13200 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13201 | child->perf_event_ctxp[ctxn] = child_ctx; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13202 | } |
| 13203 | |
| 13204 | ret = inherit_group(event, parent, parent_ctx, |
| 13205 | child, child_ctx); |
| 13206 | |
| 13207 | if (ret) |
| 13208 | *inherited_all = 0; |
| 13209 | |
| 13210 | return ret; |
| 13211 | } |
| 13212 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13213 | /* |
| 13214 | * Initialize the perf_event context in task_struct |
| 13215 | */ |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 13216 | static int perf_event_init_context(struct task_struct *child, int ctxn, |
| 13217 | u64 clone_flags) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13218 | { |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13219 | struct perf_event_context *child_ctx, *parent_ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13220 | struct perf_event_context *cloned_ctx; |
| 13221 | struct perf_event *event; |
| 13222 | struct task_struct *parent = current; |
| 13223 | int inherited_all = 1; |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 13224 | unsigned long flags; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13225 | int ret = 0; |
| 13226 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13227 | if (likely(!parent->perf_event_ctxp[ctxn])) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13228 | return 0; |
| 13229 | |
| 13230 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13231 | * If the parent's context is a clone, pin it so it won't get |
| 13232 | * swapped under us. |
| 13233 | */ |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13234 | parent_ctx = perf_pin_task_context(parent, ctxn); |
Peter Zijlstra | ffb4ef2 | 2014-05-05 19:12:20 +0200 | [diff] [blame] | 13235 | if (!parent_ctx) |
| 13236 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13237 | |
| 13238 | /* |
| 13239 | * No need to check if parent_ctx != NULL here; since we saw |
| 13240 | * it non-NULL earlier, the only reason for it to become NULL |
| 13241 | * is if we exit, and since we're currently in the middle of |
| 13242 | * a fork we can't be exiting at the same time. |
| 13243 | */ |
| 13244 | |
| 13245 | /* |
| 13246 | * Lock the parent list. No need to lock the child - not PID |
| 13247 | * hashed yet and not running, so nobody can access it. |
| 13248 | */ |
| 13249 | mutex_lock(&parent_ctx->mutex); |
| 13250 | |
| 13251 | /* |
| 13252 | * We dont have to disable NMIs - we are only looking at |
| 13253 | * the list, not manipulating it: |
| 13254 | */ |
Peter Zijlstra | 6e6804d | 2017-11-13 14:28:41 +0100 | [diff] [blame] | 13255 | perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13256 | ret = inherit_task_group(event, parent, parent_ctx, |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 13257 | child, ctxn, clone_flags, |
| 13258 | &inherited_all); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13259 | if (ret) |
Peter Zijlstra | e7cc486 | 2017-03-16 13:47:49 +0100 | [diff] [blame] | 13260 | goto out_unlock; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13261 | } |
| 13262 | |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 13263 | /* |
| 13264 | * We can't hold ctx->lock when iterating the ->flexible_group list due |
| 13265 | * to allocations, but we need to prevent rotation because |
| 13266 | * rotate_ctx() will change the list from interrupt context. |
| 13267 | */ |
| 13268 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
| 13269 | parent_ctx->rotate_disable = 1; |
| 13270 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
| 13271 | |
Peter Zijlstra | 6e6804d | 2017-11-13 14:28:41 +0100 | [diff] [blame] | 13272 | perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13273 | ret = inherit_task_group(event, parent, parent_ctx, |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 13274 | child, ctxn, clone_flags, |
| 13275 | &inherited_all); |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13276 | if (ret) |
Peter Zijlstra | e7cc486 | 2017-03-16 13:47:49 +0100 | [diff] [blame] | 13277 | goto out_unlock; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13278 | } |
| 13279 | |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 13280 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
| 13281 | parent_ctx->rotate_disable = 0; |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 13282 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13283 | child_ctx = child->perf_event_ctxp[ctxn]; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 13284 | |
Peter Zijlstra | 05cbaa2 | 2009-12-30 16:00:35 +0100 | [diff] [blame] | 13285 | if (child_ctx && inherited_all) { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13286 | /* |
| 13287 | * Mark the child context as a clone of the parent |
| 13288 | * context, or of whatever the parent is a clone of. |
Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 13289 | * |
| 13290 | * Note that if the parent is a clone, the holding of |
| 13291 | * parent_ctx->lock avoids it from being uncloned. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13292 | */ |
Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 13293 | cloned_ctx = parent_ctx->parent_ctx; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13294 | if (cloned_ctx) { |
| 13295 | child_ctx->parent_ctx = cloned_ctx; |
| 13296 | child_ctx->parent_gen = parent_ctx->parent_gen; |
| 13297 | } else { |
| 13298 | child_ctx->parent_ctx = parent_ctx; |
| 13299 | child_ctx->parent_gen = parent_ctx->generation; |
| 13300 | } |
| 13301 | get_ctx(child_ctx->parent_ctx); |
| 13302 | } |
| 13303 | |
Peter Zijlstra | c5ed514 | 2011-01-17 13:45:37 +0100 | [diff] [blame] | 13304 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
Peter Zijlstra | e7cc486 | 2017-03-16 13:47:49 +0100 | [diff] [blame] | 13305 | out_unlock: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13306 | mutex_unlock(&parent_ctx->mutex); |
| 13307 | |
| 13308 | perf_unpin_context(parent_ctx); |
Peter Zijlstra | fe4b04f | 2011-02-02 13:19:09 +0100 | [diff] [blame] | 13309 | put_ctx(parent_ctx); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13310 | |
| 13311 | return ret; |
| 13312 | } |
| 13313 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13314 | /* |
| 13315 | * Initialize the perf_event context in task_struct |
| 13316 | */ |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 13317 | int perf_event_init_task(struct task_struct *child, u64 clone_flags) |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13318 | { |
| 13319 | int ctxn, ret; |
| 13320 | |
Oleg Nesterov | 8550d7c | 2011-01-19 19:22:28 +0100 | [diff] [blame] | 13321 | memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); |
| 13322 | mutex_init(&child->perf_event_mutex); |
| 13323 | INIT_LIST_HEAD(&child->perf_event_list); |
| 13324 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13325 | for_each_task_context_nr(ctxn) { |
Marco Elver | 2b26f0a | 2021-04-08 12:35:58 +0200 | [diff] [blame] | 13326 | ret = perf_event_init_context(child, ctxn, clone_flags); |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 13327 | if (ret) { |
| 13328 | perf_event_free_task(child); |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13329 | return ret; |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 13330 | } |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 13331 | } |
| 13332 | |
| 13333 | return 0; |
| 13334 | } |
| 13335 | |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 13336 | static void __init perf_event_init_all_cpus(void) |
| 13337 | { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 13338 | struct swevent_htable *swhash; |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 13339 | int cpu; |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 13340 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 13341 | zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL); |
| 13342 | |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 13343 | for_each_possible_cpu(cpu) { |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 13344 | swhash = &per_cpu(swevent_htable, cpu); |
| 13345 | mutex_init(&swhash->hlist_mutex); |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 13346 | INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu)); |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 13347 | |
| 13348 | INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu)); |
| 13349 | raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu)); |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 13350 | |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 13351 | #ifdef CONFIG_CGROUP_PERF |
| 13352 | INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu)); |
| 13353 | #endif |
Kan Liang | a5398bf | 2020-11-30 11:38:40 -0800 | [diff] [blame] | 13354 | INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu)); |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 13355 | } |
| 13356 | } |
| 13357 | |
Valdis Kletnieks | d18bf42 | 2019-03-12 04:06:37 -0400 | [diff] [blame] | 13358 | static void perf_swevent_init_cpu(unsigned int cpu) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13359 | { |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13360 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13361 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 13362 | mutex_lock(&swhash->hlist_mutex); |
Thomas Gleixner | 059fcd8 | 2016-02-09 20:11:34 +0000 | [diff] [blame] | 13363 | if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) { |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 13364 | struct swevent_hlist *hlist; |
| 13365 | |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 13366 | hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); |
| 13367 | WARN_ON(!hlist); |
| 13368 | rcu_assign_pointer(swhash->swevent_hlist, hlist); |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 13369 | } |
Peter Zijlstra | b28ab83 | 2010-09-06 14:48:15 +0200 | [diff] [blame] | 13370 | mutex_unlock(&swhash->hlist_mutex); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13371 | } |
| 13372 | |
Dave Young | 2965faa | 2015-09-09 15:38:55 -0700 | [diff] [blame] | 13373 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13374 | static void __perf_event_exit_context(void *__info) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13375 | { |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13376 | struct perf_event_context *ctx = __info; |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 13377 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); |
| 13378 | struct perf_event *event; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13379 | |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 13380 | raw_spin_lock(&ctx->lock); |
Peter Zijlstra | 0ee098c | 2017-09-05 13:24:28 +0200 | [diff] [blame] | 13381 | ctx_sched_out(ctx, cpuctx, EVENT_TIME); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 13382 | list_for_each_entry(event, &ctx->event_list, event_entry) |
Peter Zijlstra | 45a0e07 | 2016-01-26 13:09:48 +0100 | [diff] [blame] | 13383 | __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 13384 | raw_spin_unlock(&ctx->lock); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13385 | } |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13386 | |
| 13387 | static void perf_event_exit_cpu_context(int cpu) |
| 13388 | { |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 13389 | struct perf_cpu_context *cpuctx; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13390 | struct perf_event_context *ctx; |
| 13391 | struct pmu *pmu; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13392 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 13393 | mutex_lock(&pmus_lock); |
| 13394 | list_for_each_entry(pmu, &pmus, entry) { |
| 13395 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
| 13396 | ctx = &cpuctx->ctx; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13397 | |
| 13398 | mutex_lock(&ctx->mutex); |
| 13399 | smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 13400 | cpuctx->online = 0; |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13401 | mutex_unlock(&ctx->mutex); |
| 13402 | } |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 13403 | cpumask_clear_cpu(cpu, perf_online_mask); |
| 13404 | mutex_unlock(&pmus_lock); |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13405 | } |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 13406 | #else |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 13407 | |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 13408 | static void perf_event_exit_cpu_context(int cpu) { } |
| 13409 | |
| 13410 | #endif |
| 13411 | |
Thomas Gleixner | a63fbed | 2017-05-24 10:15:34 +0200 | [diff] [blame] | 13412 | int perf_event_init_cpu(unsigned int cpu) |
| 13413 | { |
| 13414 | struct perf_cpu_context *cpuctx; |
| 13415 | struct perf_event_context *ctx; |
| 13416 | struct pmu *pmu; |
| 13417 | |
| 13418 | perf_swevent_init_cpu(cpu); |
| 13419 | |
| 13420 | mutex_lock(&pmus_lock); |
| 13421 | cpumask_set_cpu(cpu, perf_online_mask); |
| 13422 | list_for_each_entry(pmu, &pmus, entry) { |
| 13423 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
| 13424 | ctx = &cpuctx->ctx; |
| 13425 | |
| 13426 | mutex_lock(&ctx->mutex); |
| 13427 | cpuctx->online = 1; |
| 13428 | mutex_unlock(&ctx->mutex); |
| 13429 | } |
| 13430 | mutex_unlock(&pmus_lock); |
| 13431 | |
| 13432 | return 0; |
| 13433 | } |
| 13434 | |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 13435 | int perf_event_exit_cpu(unsigned int cpu) |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13436 | { |
Peter Zijlstra | e3703f8 | 2014-02-24 12:06:12 +0100 | [diff] [blame] | 13437 | perf_event_exit_cpu_context(cpu); |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 13438 | return 0; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13439 | } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13440 | |
Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 13441 | static int |
| 13442 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) |
| 13443 | { |
| 13444 | int cpu; |
| 13445 | |
| 13446 | for_each_online_cpu(cpu) |
| 13447 | perf_event_exit_cpu(cpu); |
| 13448 | |
| 13449 | return NOTIFY_OK; |
| 13450 | } |
| 13451 | |
| 13452 | /* |
| 13453 | * Run the perf reboot notifier at the very last possible moment so that |
| 13454 | * the generic watchdog code runs as long as possible. |
| 13455 | */ |
| 13456 | static struct notifier_block perf_reboot_notifier = { |
| 13457 | .notifier_call = perf_reboot, |
| 13458 | .priority = INT_MIN, |
| 13459 | }; |
| 13460 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13461 | void __init perf_event_init(void) |
| 13462 | { |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 13463 | int ret; |
| 13464 | |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 13465 | idr_init(&pmu_idr); |
| 13466 | |
Paul Mackerras | 220b140 | 2010-03-10 20:45:52 +1100 | [diff] [blame] | 13467 | perf_event_init_all_cpus(); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 13468 | init_srcu_struct(&pmus_srcu); |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 13469 | perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); |
| 13470 | perf_pmu_register(&perf_cpu_clock, NULL, -1); |
| 13471 | perf_pmu_register(&perf_task_clock, NULL, -1); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 13472 | perf_tp_register(); |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 13473 | perf_event_init_cpu(smp_processor_id()); |
Peter Zijlstra | c277443 | 2010-12-08 15:29:02 +0100 | [diff] [blame] | 13474 | register_reboot_notifier(&perf_reboot_notifier); |
Jason Wessel | 3c502e7 | 2010-11-04 17:33:01 -0500 | [diff] [blame] | 13475 | |
| 13476 | ret = init_hw_breakpoint(); |
| 13477 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); |
Gleb Natapov | b202952 | 2011-11-27 17:59:09 +0200 | [diff] [blame] | 13478 | |
Namhyung Kim | bdacfaf | 2021-03-11 20:54:12 +0900 | [diff] [blame] | 13479 | perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC); |
| 13480 | |
Jiri Olsa | b01c3a0 | 2012-03-23 15:41:20 +0100 | [diff] [blame] | 13481 | /* |
| 13482 | * Build time assertion that we keep the data_head at the intended |
| 13483 | * location. IOW, validation we got the __reserved[] size right. |
| 13484 | */ |
| 13485 | BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) |
| 13486 | != 1024); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13487 | } |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 13488 | |
Cody P Schafer | fd979c0 | 2015-01-30 13:45:57 -0800 | [diff] [blame] | 13489 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 13490 | char *page) |
| 13491 | { |
| 13492 | struct perf_pmu_events_attr *pmu_attr = |
| 13493 | container_of(attr, struct perf_pmu_events_attr, attr); |
| 13494 | |
| 13495 | if (pmu_attr->event_str) |
| 13496 | return sprintf(page, "%s\n", pmu_attr->event_str); |
| 13497 | |
| 13498 | return 0; |
| 13499 | } |
Thomas Gleixner | 675965b | 2016-02-22 22:19:27 +0000 | [diff] [blame] | 13500 | EXPORT_SYMBOL_GPL(perf_event_sysfs_show); |
Cody P Schafer | fd979c0 | 2015-01-30 13:45:57 -0800 | [diff] [blame] | 13501 | |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 13502 | static int __init perf_event_sysfs_init(void) |
| 13503 | { |
| 13504 | struct pmu *pmu; |
| 13505 | int ret; |
| 13506 | |
| 13507 | mutex_lock(&pmus_lock); |
| 13508 | |
| 13509 | ret = bus_register(&pmu_bus); |
| 13510 | if (ret) |
| 13511 | goto unlock; |
| 13512 | |
| 13513 | list_for_each_entry(pmu, &pmus, entry) { |
| 13514 | if (!pmu->name || pmu->type < 0) |
| 13515 | continue; |
| 13516 | |
| 13517 | ret = pmu_dev_alloc(pmu); |
| 13518 | WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); |
| 13519 | } |
| 13520 | pmu_bus_running = 1; |
| 13521 | ret = 0; |
| 13522 | |
| 13523 | unlock: |
| 13524 | mutex_unlock(&pmus_lock); |
| 13525 | |
| 13526 | return ret; |
| 13527 | } |
| 13528 | device_initcall(perf_event_sysfs_init); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13529 | |
| 13530 | #ifdef CONFIG_CGROUP_PERF |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 13531 | static struct cgroup_subsys_state * |
| 13532 | perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13533 | { |
| 13534 | struct perf_cgroup *jc; |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13535 | |
Li Zefan | 1b15d05 | 2011-03-03 14:26:06 +0800 | [diff] [blame] | 13536 | jc = kzalloc(sizeof(*jc), GFP_KERNEL); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13537 | if (!jc) |
| 13538 | return ERR_PTR(-ENOMEM); |
| 13539 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13540 | jc->info = alloc_percpu(struct perf_cgroup_info); |
| 13541 | if (!jc->info) { |
| 13542 | kfree(jc); |
| 13543 | return ERR_PTR(-ENOMEM); |
| 13544 | } |
| 13545 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13546 | return &jc->css; |
| 13547 | } |
| 13548 | |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 13549 | static void perf_cgroup_css_free(struct cgroup_subsys_state *css) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13550 | { |
Tejun Heo | eb95419 | 2013-08-08 20:11:23 -0400 | [diff] [blame] | 13551 | struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); |
| 13552 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13553 | free_percpu(jc->info); |
| 13554 | kfree(jc); |
| 13555 | } |
| 13556 | |
Namhyung Kim | 96aaab6 | 2020-03-25 21:45:28 +0900 | [diff] [blame] | 13557 | static int perf_cgroup_css_online(struct cgroup_subsys_state *css) |
| 13558 | { |
| 13559 | perf_event_cgroup(css->cgroup); |
| 13560 | return 0; |
| 13561 | } |
| 13562 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13563 | static int __perf_cgroup_move(void *info) |
| 13564 | { |
| 13565 | struct task_struct *task = info; |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 13566 | rcu_read_lock(); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13567 | perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); |
Stephane Eranian | ddaaf4e | 2015-11-12 11:00:03 +0100 | [diff] [blame] | 13568 | rcu_read_unlock(); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13569 | return 0; |
| 13570 | } |
| 13571 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 13572 | static void perf_cgroup_attach(struct cgroup_taskset *tset) |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13573 | { |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 13574 | struct task_struct *task; |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 13575 | struct cgroup_subsys_state *css; |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 13576 | |
Tejun Heo | 1f7dd3e5 | 2015-12-03 10:18:21 -0500 | [diff] [blame] | 13577 | cgroup_taskset_for_each(task, css, tset) |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 13578 | task_function_call(task, __perf_cgroup_move, task); |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13579 | } |
| 13580 | |
Tejun Heo | 073219e | 2014-02-08 10:36:58 -0500 | [diff] [blame] | 13581 | struct cgroup_subsys perf_event_cgrp_subsys = { |
Tejun Heo | 92fb974 | 2012-11-19 08:13:38 -0800 | [diff] [blame] | 13582 | .css_alloc = perf_cgroup_css_alloc, |
| 13583 | .css_free = perf_cgroup_css_free, |
Namhyung Kim | 96aaab6 | 2020-03-25 21:45:28 +0900 | [diff] [blame] | 13584 | .css_online = perf_cgroup_css_online, |
Tejun Heo | bb9d97b | 2011-12-12 18:12:21 -0800 | [diff] [blame] | 13585 | .attach = perf_cgroup_attach, |
Tejun Heo | 968ebff | 2017-01-29 14:35:20 -0500 | [diff] [blame] | 13586 | /* |
| 13587 | * Implicitly enable on dfl hierarchy so that perf events can |
| 13588 | * always be filtered by cgroup2 path as long as perf_event |
| 13589 | * controller is not mounted on a legacy hierarchy. |
| 13590 | */ |
| 13591 | .implicit_on_dfl = true, |
Tejun Heo | 8cfd814 | 2017-07-21 11:14:51 -0400 | [diff] [blame] | 13592 | .threaded = true, |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 13593 | }; |
| 13594 | #endif /* CONFIG_CGROUP_PERF */ |
Song Liu | c22ac2a | 2021-09-10 11:33:50 -0700 | [diff] [blame] | 13595 | |
| 13596 | DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t); |