blob: 4b144b02ca5d91c7b60a96384b223c04c6311acf [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/profile.c
4 * Simple profiling. Manages a direct-mapped profile hit count buffer,
5 * with configurable resolution, support for restricting the cpus on
6 * which profiling is done, and switching between cpu time and
7 * schedule() calls via kernel command line parameters passed at boot.
8 *
9 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
10 * Red Hat, July 2004
11 * Consolidation of architecture support code for profiling,
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Nadia Yvette Chambers, Oracle, July 2004
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Amortized hit count accounting via per-cpu open-addressed hashtables
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010014 * to resolve timer interrupt livelocks, Nadia Yvette Chambers,
15 * Oracle, 2004
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Paul Gortmaker9984de12011-05-23 14:51:41 -040018#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/profile.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070020#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/notifier.h>
22#include <linux/mm.h>
23#include <linux/cpumask.h>
24#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/highmem.h>
Arjan van de Ven97d1f152006-03-23 03:00:24 -080026#include <linux/mutex.h>
Dave Hansen22b8ce92008-10-15 22:01:46 -070027#include <linux/slab.h>
28#include <linux/vmalloc.h>
Ingo Molnar3905f9a2017-02-05 12:07:04 +010029#include <linux/sched/stat.h>
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/sections.h>
David Howells7d12e782006-10-05 14:55:46 +010032#include <asm/irq_regs.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040033#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35struct profile_hit {
36 u32 pc, hits;
37};
38#define PROFILE_GRPSHIFT 3
39#define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
40#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
41#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043static atomic_t *prof_buffer;
44static unsigned long prof_len, prof_shift;
Ingo Molnar07031e12007-01-10 23:15:38 -080045
Ingo Molnarece8a682006-12-06 20:37:24 -080046int prof_on __read_mostly;
Ingo Molnar07031e12007-01-10 23:15:38 -080047EXPORT_SYMBOL_GPL(prof_on);
48
Rusty Russellc309b912009-01-01 10:12:27 +103049static cpumask_var_t prof_cpu_mask;
Arnd Bergmannade356b2016-03-22 14:27:26 -070050#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
52static DEFINE_PER_CPU(int, cpu_profile_flip);
Arjan van de Ven97d1f152006-03-23 03:00:24 -080053static DEFINE_MUTEX(profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#endif /* CONFIG_SMP */
55
Dave Hansen22b8ce92008-10-15 22:01:46 -070056int profile_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Fabian Frederickf3da64d2014-06-06 14:37:30 -070058 static const char schedstr[] = "schedule";
59 static const char sleepstr[] = "sleep";
60 static const char kvmstr[] = "kvm";
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 int par;
62
Ingo Molnarece8a682006-12-06 20:37:24 -080063 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
Mel Gormanb3da2a72007-10-24 18:23:50 +020064#ifdef CONFIG_SCHEDSTATS
Mel Gormancb251762016-02-05 09:08:36 +000065 force_schedstat_enabled();
Ingo Molnarece8a682006-12-06 20:37:24 -080066 prof_on = SLEEP_PROFILING;
67 if (str[strlen(sleepstr)] == ',')
68 str += strlen(sleepstr) + 1;
69 if (get_option(&str, &par))
70 prof_shift = par;
Fabian Frederickaba871f2014-06-06 14:37:29 -070071 pr_info("kernel sleep profiling enabled (shift: %ld)\n",
Ingo Molnarece8a682006-12-06 20:37:24 -080072 prof_shift);
Mel Gormanb3da2a72007-10-24 18:23:50 +020073#else
Fabian Frederickaba871f2014-06-06 14:37:29 -070074 pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
Mel Gormanb3da2a72007-10-24 18:23:50 +020075#endif /* CONFIG_SCHEDSTATS */
Ingo Molnara75acf82007-01-05 16:36:29 -080076 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 prof_on = SCHED_PROFILING;
William Lee Irwin IIIdfaa9c92005-05-16 21:53:58 -070078 if (str[strlen(schedstr)] == ',')
79 str += strlen(schedstr) + 1;
80 if (get_option(&str, &par))
81 prof_shift = par;
Fabian Frederickaba871f2014-06-06 14:37:29 -070082 pr_info("kernel schedule profiling enabled (shift: %ld)\n",
William Lee Irwin IIIdfaa9c92005-05-16 21:53:58 -070083 prof_shift);
Ingo Molnar07031e12007-01-10 23:15:38 -080084 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
85 prof_on = KVM_PROFILING;
86 if (str[strlen(kvmstr)] == ',')
87 str += strlen(kvmstr) + 1;
88 if (get_option(&str, &par))
89 prof_shift = par;
Fabian Frederickaba871f2014-06-06 14:37:29 -070090 pr_info("kernel KVM profiling enabled (shift: %ld)\n",
Ingo Molnar07031e12007-01-10 23:15:38 -080091 prof_shift);
William Lee Irwin IIIdfaa9c92005-05-16 21:53:58 -070092 } else if (get_option(&str, &par)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 prof_shift = par;
94 prof_on = CPU_PROFILING;
Fabian Frederickaba871f2014-06-06 14:37:29 -070095 pr_info("kernel profiling enabled (shift: %ld)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 prof_shift);
97 }
98 return 1;
99}
100__setup("profile=", profile_setup);
101
102
Paul Mundtce05fcc2008-10-29 14:01:07 -0700103int __ref profile_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Dave Hansen22b8ce92008-10-15 22:01:46 -0700105 int buffer_bytes;
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100106 if (!prof_on)
Dave Hansen22b8ce92008-10-15 22:01:46 -0700107 return 0;
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 /* only text is profiled */
110 prof_len = (_etext - _stext) >> prof_shift;
Dave Hansen22b8ce92008-10-15 22:01:46 -0700111 buffer_bytes = prof_len*sizeof(atomic_t);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700112
Rusty Russellc309b912009-01-01 10:12:27 +1030113 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
114 return -ENOMEM;
115
Hugh Dickinsacd89572009-02-09 19:20:50 +0000116 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
117
Mel Gormanb62f4952009-07-29 15:04:09 -0700118 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700119 if (prof_buffer)
120 return 0;
121
Mel Gormanb62f4952009-07-29 15:04:09 -0700122 prof_buffer = alloc_pages_exact(buffer_bytes,
123 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700124 if (prof_buffer)
125 return 0;
126
Jesper Juhl559fa6e2010-10-30 21:56:26 +0200127 prof_buffer = vzalloc(buffer_bytes);
128 if (prof_buffer)
Dave Hansen22b8ce92008-10-15 22:01:46 -0700129 return 0;
130
Rusty Russellc309b912009-01-01 10:12:27 +1030131 free_cpumask_var(prof_cpu_mask);
Dave Hansen22b8ce92008-10-15 22:01:46 -0700132 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133}
134
135/* Profile event notifications */
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100136
Alan Sterne041c682006-03-27 01:16:30 -0800137static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
138static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
139static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100140
141void profile_task_exit(struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Alan Sterne041c682006-03-27 01:16:30 -0800143 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100145
146int profile_handoff_task(struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
148 int ret;
Alan Sterne041c682006-03-27 01:16:30 -0800149 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 return (ret == NOTIFY_OK) ? 1 : 0;
151}
152
153void profile_munmap(unsigned long addr)
154{
Alan Sterne041c682006-03-27 01:16:30 -0800155 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100158int task_handoff_register(struct notifier_block *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
Alan Sterne041c682006-03-27 01:16:30 -0800160 return atomic_notifier_chain_register(&task_free_notifier, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100162EXPORT_SYMBOL_GPL(task_handoff_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100164int task_handoff_unregister(struct notifier_block *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Alan Sterne041c682006-03-27 01:16:30 -0800166 return atomic_notifier_chain_unregister(&task_free_notifier, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100168EXPORT_SYMBOL_GPL(task_handoff_unregister);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100170int profile_event_register(enum profile_type type, struct notifier_block *n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 switch (type) {
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100175 case PROFILE_TASK_EXIT:
176 err = blocking_notifier_chain_register(
177 &task_exit_notifier, n);
178 break;
179 case PROFILE_MUNMAP:
180 err = blocking_notifier_chain_register(
181 &munmap_notifier, n);
182 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 }
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 return err;
186}
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100187EXPORT_SYMBOL_GPL(profile_event_register);
188
189int profile_event_unregister(enum profile_type type, struct notifier_block *n)
190{
191 int err = -EINVAL;
192
193 switch (type) {
194 case PROFILE_TASK_EXIT:
195 err = blocking_notifier_chain_unregister(
196 &task_exit_notifier, n);
197 break;
198 case PROFILE_MUNMAP:
199 err = blocking_notifier_chain_unregister(
200 &munmap_notifier, n);
201 break;
202 }
203
204 return err;
205}
206EXPORT_SYMBOL_GPL(profile_event_unregister);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Arnd Bergmannade356b2016-03-22 14:27:26 -0700208#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209/*
210 * Each cpu has a pair of open-addressed hashtables for pending
211 * profile hits. read_profile() IPI's all cpus to request them
212 * to flip buffers and flushes their contents to prof_buffer itself.
213 * Flip requests are serialized by the profile_flip_mutex. The sole
214 * use of having a second hashtable is for avoiding cacheline
215 * contention that would otherwise happen during flushes of pending
216 * profile hits required for the accuracy of reported profile hits
217 * and so resurrect the interrupt livelock issue.
218 *
219 * The open-addressed hashtables are indexed by profile buffer slot
220 * and hold the number of pending hits to that profile buffer slot on
221 * a cpu in an entry. When the hashtable overflows, all pending hits
222 * are accounted to their corresponding profile buffer slots with
223 * atomic_add() and the hashtable emptied. As numerous pending hits
224 * may be accounted to a profile buffer slot in a hashtable entry,
225 * this amortizes a number of atomic profile buffer increments likely
226 * to be far larger than the number of entries in the hashtable,
227 * particularly given that the number of distinct profile buffer
228 * positions to which hits are accounted during short intervals (e.g.
229 * several seconds) is usually very small. Exclusion from buffer
230 * flipping is provided by interrupt disablement (note that for
Ingo Molnarece8a682006-12-06 20:37:24 -0800231 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
232 * process context).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 * The hash function is meant to be lightweight as opposed to strong,
234 * and was vaguely inspired by ppc64 firmware-supported inverted
235 * pagetable hash functions, but uses a full hashtable full of finite
236 * collision chains, not just pairs of them.
237 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100238 * -- nyc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 */
240static void __profile_flip_buffers(void *unused)
241{
242 int cpu = smp_processor_id();
243
244 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
245}
246
247static void profile_flip_buffers(void)
248{
249 int i, j, cpu;
250
Arjan van de Ven97d1f152006-03-23 03:00:24 -0800251 mutex_lock(&profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 j = per_cpu(cpu_profile_flip, get_cpu());
253 put_cpu();
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200254 on_each_cpu(__profile_flip_buffers, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 for_each_online_cpu(cpu) {
256 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
257 for (i = 0; i < NR_PROFILE_HIT; ++i) {
258 if (!hits[i].hits) {
259 if (hits[i].pc)
260 hits[i].pc = 0;
261 continue;
262 }
263 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
264 hits[i].hits = hits[i].pc = 0;
265 }
266 }
Arjan van de Ven97d1f152006-03-23 03:00:24 -0800267 mutex_unlock(&profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
270static void profile_discard_flip_buffers(void)
271{
272 int i, cpu;
273
Arjan van de Ven97d1f152006-03-23 03:00:24 -0800274 mutex_lock(&profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 i = per_cpu(cpu_profile_flip, get_cpu());
276 put_cpu();
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200277 on_each_cpu(__profile_flip_buffers, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 for_each_online_cpu(cpu) {
279 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
280 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
281 }
Arjan van de Ven97d1f152006-03-23 03:00:24 -0800282 mutex_unlock(&profile_flip_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
Rakib Mullick6f7bd762011-05-26 16:26:00 -0700285static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286{
287 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
288 int i, j, cpu;
289 struct profile_hit *hits;
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
292 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
293 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
294 cpu = get_cpu();
295 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
296 if (!hits) {
297 put_cpu();
298 return;
299 }
Ingo Molnarece8a682006-12-06 20:37:24 -0800300 /*
301 * We buffer the global profiler buffer into a per-CPU
302 * queue and thus reduce the number of global (and possibly
303 * NUMA-alien) accesses. The write-queue is self-coalescing:
304 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 local_irq_save(flags);
306 do {
307 for (j = 0; j < PROFILE_GRPSZ; ++j) {
308 if (hits[i + j].pc == pc) {
Ingo Molnarece8a682006-12-06 20:37:24 -0800309 hits[i + j].hits += nr_hits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 goto out;
311 } else if (!hits[i + j].hits) {
312 hits[i + j].pc = pc;
Ingo Molnarece8a682006-12-06 20:37:24 -0800313 hits[i + j].hits = nr_hits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 goto out;
315 }
316 }
317 i = (i + secondary) & (NR_PROFILE_HIT - 1);
318 } while (i != primary);
Ingo Molnarece8a682006-12-06 20:37:24 -0800319
320 /*
321 * Add the current hit(s) and flush the write-queue out
322 * to the global buffer:
323 */
324 atomic_add(nr_hits, &prof_buffer[pc]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 for (i = 0; i < NR_PROFILE_HIT; ++i) {
326 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
327 hits[i].pc = hits[i].hits = 0;
328 }
329out:
330 local_irq_restore(flags);
331 put_cpu();
332}
333
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000334static int profile_dead_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000336 struct page *page;
337 int i;
338
Nathan Chancelloref70eff2019-12-04 16:50:50 -0800339 if (cpumask_available(prof_cpu_mask))
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000340 cpumask_clear_cpu(cpu, prof_cpu_mask);
341
342 for (i = 0; i < 2; i++) {
343 if (per_cpu(cpu_profile_hits, cpu)[i]) {
344 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
345 per_cpu(cpu_profile_hits, cpu)[i] = NULL;
346 __free_page(page);
347 }
348 }
349 return 0;
350}
351
352static int profile_prepare_cpu(unsigned int cpu)
353{
354 int i, node = cpu_to_mem(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 struct page *page;
356
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000357 per_cpu(cpu_profile_flip, cpu) = 0;
358
359 for (i = 0; i < 2; i++) {
360 if (per_cpu(cpu_profile_hits, cpu)[i])
361 continue;
362
363 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
364 if (!page) {
365 profile_dead_cpu(cpu);
366 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000368 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 }
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000371 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372}
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000373
374static int profile_online_cpu(unsigned int cpu)
375{
Nathan Chancelloref70eff2019-12-04 16:50:50 -0800376 if (cpumask_available(prof_cpu_mask))
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000377 cpumask_set_cpu(cpu, prof_cpu_mask);
378
379 return 0;
380}
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382#else /* !CONFIG_SMP */
383#define profile_flip_buffers() do { } while (0)
384#define profile_discard_flip_buffers() do { } while (0)
385
Rakib Mullick6f7bd762011-05-26 16:26:00 -0700386static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 unsigned long pc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
Ingo Molnarece8a682006-12-06 20:37:24 -0800390 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392#endif /* !CONFIG_SMP */
Rakib Mullick6f7bd762011-05-26 16:26:00 -0700393
394void profile_hits(int type, void *__pc, unsigned int nr_hits)
395{
396 if (prof_on != type || !prof_buffer)
397 return;
398 do_profile_hits(type, __pc, nr_hits);
399}
Andrew Mortonbbe1a59b2007-01-22 20:40:33 -0800400EXPORT_SYMBOL_GPL(profile_hits);
401
David Howells7d12e782006-10-05 14:55:46 +0100402void profile_tick(int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
David Howells7d12e782006-10-05 14:55:46 +0100404 struct pt_regs *regs = get_irq_regs();
405
Nathan Chancelloref70eff2019-12-04 16:50:50 -0800406 if (!user_mode(regs) && cpumask_available(prof_cpu_mask) &&
Rusty Russellc309b912009-01-01 10:12:27 +1030407 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 profile_hit(type, (void *)profile_pc(regs));
409}
410
411#ifdef CONFIG_PROC_FS
412#include <linux/proc_fs.h>
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700413#include <linux/seq_file.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -0800414#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700416static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417{
Tejun Heoccbd59c2015-02-13 14:38:13 -0800418 seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700419 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700422static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700424 return single_open(file, prof_cpu_mask_proc_show, NULL);
425}
426
427static ssize_t prof_cpu_mask_proc_write(struct file *file,
428 const char __user *buffer, size_t count, loff_t *pos)
429{
Rusty Russellc309b912009-01-01 10:12:27 +1030430 cpumask_var_t new_value;
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700431 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Rusty Russellc309b912009-01-01 10:12:27 +1030433 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
434 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Rusty Russellc309b912009-01-01 10:12:27 +1030436 err = cpumask_parse_user(buffer, count, new_value);
437 if (!err) {
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700438 cpumask_copy(prof_cpu_mask, new_value);
439 err = count;
Rusty Russellc309b912009-01-01 10:12:27 +1030440 }
441 free_cpumask_var(new_value);
442 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443}
444
Alexey Dobriyan583a22e2009-09-18 12:57:09 -0700445static const struct file_operations prof_cpu_mask_proc_fops = {
446 .open = prof_cpu_mask_proc_open,
447 .read = seq_read,
448 .llseek = seq_lseek,
449 .release = single_release,
450 .write = prof_cpu_mask_proc_write,
451};
452
Al Virofbd387a2013-04-01 20:48:34 -0400453void create_prof_cpu_mask(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 /* create /proc/irq/prof_cpu_mask */
Al Virofbd387a2013-04-01 20:48:34 -0400456 proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457}
458
459/*
460 * This function accesses profiling information. The returned data is
461 * binary: the sampling step and the actual contents of the profile
462 * buffer. Use of the program readprofile is recommended in order to
463 * get meaningful info out of these data.
464 */
465static ssize_t
466read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
467{
468 unsigned long p = *ppos;
469 ssize_t read;
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100470 char *pnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 unsigned int sample_step = 1 << prof_shift;
472
473 profile_flip_buffers();
474 if (p >= (prof_len+1)*sizeof(unsigned int))
475 return 0;
476 if (count > (prof_len+1)*sizeof(unsigned int) - p)
477 count = (prof_len+1)*sizeof(unsigned int) - p;
478 read = 0;
479
480 while (p < sizeof(unsigned int) && count > 0) {
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100481 if (put_user(*((char *)(&sample_step)+p), buf))
Heiko Carstens064b0222006-12-06 20:36:37 -0800482 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 buf++; p++; count--; read++;
484 }
485 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100486 if (copy_to_user(buf, (void *)pnt, count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 return -EFAULT;
488 read += count;
489 *ppos += read;
490 return read;
491}
492
493/*
494 * Writing to /proc/profile resets the counters
495 *
496 * Writing a 'profiling multiplier' value into it also re-sets the profiling
497 * interrupt frequency, on architectures that support this.
498 */
499static ssize_t write_profile(struct file *file, const char __user *buf,
500 size_t count, loff_t *ppos)
501{
502#ifdef CONFIG_SMP
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100503 extern int setup_profiling_timer(unsigned int multiplier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505 if (count == sizeof(int)) {
506 unsigned int multiplier;
507
508 if (copy_from_user(&multiplier, buf, sizeof(int)))
509 return -EFAULT;
510
511 if (setup_profiling_timer(multiplier))
512 return -EINVAL;
513 }
514#endif
515 profile_discard_flip_buffers();
516 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
517 return count;
518}
519
Helge Deller15ad7cd2006-12-06 20:40:36 -0800520static const struct file_operations proc_profile_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 .read = read_profile,
522 .write = write_profile,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200523 .llseek = default_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524};
525
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000526int __ref create_proc_profile(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527{
528 struct proc_dir_entry *entry;
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000529#ifdef CONFIG_SMP
530 enum cpuhp_state online_state;
531#endif
532
Srivatsa S. Bhatc270a812014-03-11 02:12:08 +0530533 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 if (!prof_on)
536 return 0;
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000537#ifdef CONFIG_SMP
538 err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
539 profile_prepare_cpu, profile_dead_cpu);
540 if (err)
541 return err;
Srivatsa S. Bhatc270a812014-03-11 02:12:08 +0530542
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000543 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
544 profile_online_cpu, NULL);
545 if (err < 0)
546 goto err_state_prep;
547 online_state = err;
548 err = 0;
549#endif
Denis V. Lunevc33fff02008-04-29 01:02:31 -0700550 entry = proc_create("profile", S_IWUSR | S_IRUGO,
551 NULL, &proc_profile_operations);
Paolo Ciarrocchi1ad82fd2008-01-25 21:08:33 +0100552 if (!entry)
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000553 goto err_state_onl;
David Howells271a15e2013-04-12 00:38:51 +0100554 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
Srivatsa S. Bhatc270a812014-03-11 02:12:08 +0530555
Sebastian Andrzej Siewiore722d8d2016-07-13 17:16:59 +0000556 return err;
557err_state_onl:
558#ifdef CONFIG_SMP
559 cpuhp_remove_state(online_state);
560err_state_prep:
561 cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
562#endif
Srivatsa S. Bhatc270a812014-03-11 02:12:08 +0530563 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
Paul Gortmakerc96d6662014-04-03 14:48:35 -0700565subsys_initcall(create_proc_profile);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566#endif /* CONFIG_PROC_FS */