Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Performance events callchain code, extracted from core.c: |
| 3 | * |
| 4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame^] | 6 | * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 7 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 8 | * |
| 9 | * For licensing details see kernel-base/COPYING |
| 10 | */ |
| 11 | |
| 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include "internal.h" |
| 15 | |
| 16 | struct callchain_cpus_entries { |
| 17 | struct rcu_head rcu_head; |
| 18 | struct perf_callchain_entry *cpu_entries[0]; |
| 19 | }; |
| 20 | |
| 21 | static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); |
| 22 | static atomic_t nr_callchain_events; |
| 23 | static DEFINE_MUTEX(callchain_mutex); |
| 24 | static struct callchain_cpus_entries *callchain_cpus_entries; |
| 25 | |
| 26 | |
| 27 | __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, |
| 28 | struct pt_regs *regs) |
| 29 | { |
| 30 | } |
| 31 | |
| 32 | __weak void perf_callchain_user(struct perf_callchain_entry *entry, |
| 33 | struct pt_regs *regs) |
| 34 | { |
| 35 | } |
| 36 | |
| 37 | static void release_callchain_buffers_rcu(struct rcu_head *head) |
| 38 | { |
| 39 | struct callchain_cpus_entries *entries; |
| 40 | int cpu; |
| 41 | |
| 42 | entries = container_of(head, struct callchain_cpus_entries, rcu_head); |
| 43 | |
| 44 | for_each_possible_cpu(cpu) |
| 45 | kfree(entries->cpu_entries[cpu]); |
| 46 | |
| 47 | kfree(entries); |
| 48 | } |
| 49 | |
| 50 | static void release_callchain_buffers(void) |
| 51 | { |
| 52 | struct callchain_cpus_entries *entries; |
| 53 | |
| 54 | entries = callchain_cpus_entries; |
Andreea-Cristina Bernat | e0455e1 | 2014-08-22 17:15:36 +0300 | [diff] [blame] | 55 | RCU_INIT_POINTER(callchain_cpus_entries, NULL); |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 56 | call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); |
| 57 | } |
| 58 | |
| 59 | static int alloc_callchain_buffers(void) |
| 60 | { |
| 61 | int cpu; |
| 62 | int size; |
| 63 | struct callchain_cpus_entries *entries; |
| 64 | |
| 65 | /* |
| 66 | * We can't use the percpu allocation API for data that can be |
| 67 | * accessed from NMI. Use a temporary manual per cpu allocation |
| 68 | * until that gets sorted out. |
| 69 | */ |
| 70 | size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); |
| 71 | |
| 72 | entries = kzalloc(size, GFP_KERNEL); |
| 73 | if (!entries) |
| 74 | return -ENOMEM; |
| 75 | |
| 76 | size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; |
| 77 | |
| 78 | for_each_possible_cpu(cpu) { |
| 79 | entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, |
| 80 | cpu_to_node(cpu)); |
| 81 | if (!entries->cpu_entries[cpu]) |
| 82 | goto fail; |
| 83 | } |
| 84 | |
| 85 | rcu_assign_pointer(callchain_cpus_entries, entries); |
| 86 | |
| 87 | return 0; |
| 88 | |
| 89 | fail: |
| 90 | for_each_possible_cpu(cpu) |
| 91 | kfree(entries->cpu_entries[cpu]); |
| 92 | kfree(entries); |
| 93 | |
| 94 | return -ENOMEM; |
| 95 | } |
| 96 | |
| 97 | int get_callchain_buffers(void) |
| 98 | { |
| 99 | int err = 0; |
| 100 | int count; |
| 101 | |
| 102 | mutex_lock(&callchain_mutex); |
| 103 | |
| 104 | count = atomic_inc_return(&nr_callchain_events); |
| 105 | if (WARN_ON_ONCE(count < 1)) { |
| 106 | err = -EINVAL; |
| 107 | goto exit; |
| 108 | } |
| 109 | |
| 110 | if (count > 1) { |
| 111 | /* If the allocation failed, give up */ |
| 112 | if (!callchain_cpus_entries) |
| 113 | err = -ENOMEM; |
| 114 | goto exit; |
| 115 | } |
| 116 | |
| 117 | err = alloc_callchain_buffers(); |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 118 | exit: |
Frederic Weisbecker | 90983b1 | 2013-07-23 02:31:00 +0200 | [diff] [blame] | 119 | if (err) |
| 120 | atomic_dec(&nr_callchain_events); |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 121 | |
Frederic Weisbecker | fc3b86d | 2013-08-02 18:29:54 +0200 | [diff] [blame] | 122 | mutex_unlock(&callchain_mutex); |
| 123 | |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 124 | return err; |
| 125 | } |
| 126 | |
| 127 | void put_callchain_buffers(void) |
| 128 | { |
| 129 | if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { |
| 130 | release_callchain_buffers(); |
| 131 | mutex_unlock(&callchain_mutex); |
| 132 | } |
| 133 | } |
| 134 | |
| 135 | static struct perf_callchain_entry *get_callchain_entry(int *rctx) |
| 136 | { |
| 137 | int cpu; |
| 138 | struct callchain_cpus_entries *entries; |
| 139 | |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 140 | *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion)); |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 141 | if (*rctx == -1) |
| 142 | return NULL; |
| 143 | |
| 144 | entries = rcu_dereference(callchain_cpus_entries); |
| 145 | if (!entries) |
| 146 | return NULL; |
| 147 | |
| 148 | cpu = smp_processor_id(); |
| 149 | |
| 150 | return &entries->cpu_entries[cpu][*rctx]; |
| 151 | } |
| 152 | |
| 153 | static void |
| 154 | put_callchain_entry(int rctx) |
| 155 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 156 | put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 157 | } |
| 158 | |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 159 | struct perf_callchain_entry * |
| 160 | perf_callchain(struct perf_event *event, struct pt_regs *regs) |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 161 | { |
| 162 | int rctx; |
| 163 | struct perf_callchain_entry *entry; |
| 164 | |
Frederic Weisbecker | d077526 | 2012-08-07 15:20:41 +0200 | [diff] [blame] | 165 | int kernel = !event->attr.exclude_callchain_kernel; |
| 166 | int user = !event->attr.exclude_callchain_user; |
| 167 | |
| 168 | if (!kernel && !user) |
| 169 | return NULL; |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 170 | |
| 171 | entry = get_callchain_entry(&rctx); |
| 172 | if (rctx == -1) |
| 173 | return NULL; |
| 174 | |
| 175 | if (!entry) |
| 176 | goto exit_put; |
| 177 | |
| 178 | entry->nr = 0; |
| 179 | |
Frederic Weisbecker | d077526 | 2012-08-07 15:20:41 +0200 | [diff] [blame] | 180 | if (kernel && !user_mode(regs)) { |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 181 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); |
| 182 | perf_callchain_kernel(entry, regs); |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 183 | } |
| 184 | |
Frederic Weisbecker | d077526 | 2012-08-07 15:20:41 +0200 | [diff] [blame] | 185 | if (user) { |
| 186 | if (!user_mode(regs)) { |
| 187 | if (current->mm) |
| 188 | regs = task_pt_regs(current); |
| 189 | else |
| 190 | regs = NULL; |
| 191 | } |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 192 | |
Frederic Weisbecker | d077526 | 2012-08-07 15:20:41 +0200 | [diff] [blame] | 193 | if (regs) { |
| 194 | /* |
| 195 | * Disallow cross-task user callchains. |
| 196 | */ |
| 197 | if (event->ctx->task && event->ctx->task != current) |
| 198 | goto exit_put; |
| 199 | |
| 200 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
| 201 | perf_callchain_user(entry, regs); |
| 202 | } |
Borislav Petkov | 9251f90 | 2011-10-16 17:15:04 +0200 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | exit_put: |
| 206 | put_callchain_entry(rctx); |
| 207 | |
| 208 | return entry; |
| 209 | } |