Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/kernel/profile.c |
| 4 | * Simple profiling. Manages a direct-mapped profile hit count buffer, |
| 5 | * with configurable resolution, support for restricting the cpus on |
| 6 | * which profiling is done, and switching between cpu time and |
| 7 | * schedule() calls via kernel command line parameters passed at boot. |
| 8 | * |
| 9 | * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, |
| 10 | * Red Hat, July 2004 |
| 11 | * Consolidation of architecture support code for profiling, |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 12 | * Nadia Yvette Chambers, Oracle, July 2004 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * Amortized hit count accounting via per-cpu open-addressed hashtables |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 14 | * to resolve timer interrupt livelocks, Nadia Yvette Chambers, |
| 15 | * Oracle, 2004 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | */ |
| 17 | |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 18 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/profile.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 20 | #include <linux/memblock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/notifier.h> |
| 22 | #include <linux/mm.h> |
| 23 | #include <linux/cpumask.h> |
| 24 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/highmem.h> |
Arjan van de Ven | 97d1f15 | 2006-03-23 03:00:24 -0800 | [diff] [blame] | 26 | #include <linux/mutex.h> |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 27 | #include <linux/slab.h> |
| 28 | #include <linux/vmalloc.h> |
Ingo Molnar | 3905f9a | 2017-02-05 12:07:04 +0100 | [diff] [blame] | 29 | #include <linux/sched/stat.h> |
| 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <asm/sections.h> |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 32 | #include <asm/irq_regs.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 33 | #include <asm/ptrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
| 35 | struct profile_hit { |
| 36 | u32 pc, hits; |
| 37 | }; |
| 38 | #define PROFILE_GRPSHIFT 3 |
| 39 | #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) |
| 40 | #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) |
| 41 | #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) |
| 42 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | static atomic_t *prof_buffer; |
| 44 | static unsigned long prof_len, prof_shift; |
Ingo Molnar | 07031e1 | 2007-01-10 23:15:38 -0800 | [diff] [blame] | 45 | |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 46 | int prof_on __read_mostly; |
Ingo Molnar | 07031e1 | 2007-01-10 23:15:38 -0800 | [diff] [blame] | 47 | EXPORT_SYMBOL_GPL(prof_on); |
| 48 | |
Rusty Russell | c309b91 | 2009-01-01 10:12:27 +1030 | [diff] [blame] | 49 | static cpumask_var_t prof_cpu_mask; |
Arnd Bergmann | ade356b | 2016-03-22 14:27:26 -0700 | [diff] [blame] | 50 | #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); |
| 52 | static DEFINE_PER_CPU(int, cpu_profile_flip); |
Arjan van de Ven | 97d1f15 | 2006-03-23 03:00:24 -0800 | [diff] [blame] | 53 | static DEFINE_MUTEX(profile_flip_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #endif /* CONFIG_SMP */ |
| 55 | |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 56 | int profile_setup(char *str) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | { |
Fabian Frederick | f3da64d | 2014-06-06 14:37:30 -0700 | [diff] [blame] | 58 | static const char schedstr[] = "schedule"; |
| 59 | static const char sleepstr[] = "sleep"; |
| 60 | static const char kvmstr[] = "kvm"; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | int par; |
| 62 | |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 63 | if (!strncmp(str, sleepstr, strlen(sleepstr))) { |
Mel Gorman | b3da2a7 | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 64 | #ifdef CONFIG_SCHEDSTATS |
Mel Gorman | cb25176 | 2016-02-05 09:08:36 +0000 | [diff] [blame] | 65 | force_schedstat_enabled(); |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 66 | prof_on = SLEEP_PROFILING; |
| 67 | if (str[strlen(sleepstr)] == ',') |
| 68 | str += strlen(sleepstr) + 1; |
| 69 | if (get_option(&str, &par)) |
| 70 | prof_shift = par; |
Fabian Frederick | aba871f | 2014-06-06 14:37:29 -0700 | [diff] [blame] | 71 | pr_info("kernel sleep profiling enabled (shift: %ld)\n", |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 72 | prof_shift); |
Mel Gorman | b3da2a7 | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 73 | #else |
Fabian Frederick | aba871f | 2014-06-06 14:37:29 -0700 | [diff] [blame] | 74 | pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); |
Mel Gorman | b3da2a7 | 2007-10-24 18:23:50 +0200 | [diff] [blame] | 75 | #endif /* CONFIG_SCHEDSTATS */ |
Ingo Molnar | a75acf8 | 2007-01-05 16:36:29 -0800 | [diff] [blame] | 76 | } else if (!strncmp(str, schedstr, strlen(schedstr))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | prof_on = SCHED_PROFILING; |
William Lee Irwin III | dfaa9c9 | 2005-05-16 21:53:58 -0700 | [diff] [blame] | 78 | if (str[strlen(schedstr)] == ',') |
| 79 | str += strlen(schedstr) + 1; |
| 80 | if (get_option(&str, &par)) |
| 81 | prof_shift = par; |
Fabian Frederick | aba871f | 2014-06-06 14:37:29 -0700 | [diff] [blame] | 82 | pr_info("kernel schedule profiling enabled (shift: %ld)\n", |
William Lee Irwin III | dfaa9c9 | 2005-05-16 21:53:58 -0700 | [diff] [blame] | 83 | prof_shift); |
Ingo Molnar | 07031e1 | 2007-01-10 23:15:38 -0800 | [diff] [blame] | 84 | } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { |
| 85 | prof_on = KVM_PROFILING; |
| 86 | if (str[strlen(kvmstr)] == ',') |
| 87 | str += strlen(kvmstr) + 1; |
| 88 | if (get_option(&str, &par)) |
| 89 | prof_shift = par; |
Fabian Frederick | aba871f | 2014-06-06 14:37:29 -0700 | [diff] [blame] | 90 | pr_info("kernel KVM profiling enabled (shift: %ld)\n", |
Ingo Molnar | 07031e1 | 2007-01-10 23:15:38 -0800 | [diff] [blame] | 91 | prof_shift); |
William Lee Irwin III | dfaa9c9 | 2005-05-16 21:53:58 -0700 | [diff] [blame] | 92 | } else if (get_option(&str, &par)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | prof_shift = par; |
| 94 | prof_on = CPU_PROFILING; |
Fabian Frederick | aba871f | 2014-06-06 14:37:29 -0700 | [diff] [blame] | 95 | pr_info("kernel profiling enabled (shift: %ld)\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | prof_shift); |
| 97 | } |
| 98 | return 1; |
| 99 | } |
| 100 | __setup("profile=", profile_setup); |
| 101 | |
| 102 | |
Paul Mundt | ce05fcc | 2008-10-29 14:01:07 -0700 | [diff] [blame] | 103 | int __ref profile_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | { |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 105 | int buffer_bytes; |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 106 | if (!prof_on) |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 107 | return 0; |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | /* only text is profiled */ |
| 110 | prof_len = (_etext - _stext) >> prof_shift; |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 111 | buffer_bytes = prof_len*sizeof(atomic_t); |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 112 | |
Rusty Russell | c309b91 | 2009-01-01 10:12:27 +1030 | [diff] [blame] | 113 | if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) |
| 114 | return -ENOMEM; |
| 115 | |
Hugh Dickins | acd8957 | 2009-02-09 19:20:50 +0000 | [diff] [blame] | 116 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); |
| 117 | |
Mel Gorman | b62f495 | 2009-07-29 15:04:09 -0700 | [diff] [blame] | 118 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 119 | if (prof_buffer) |
| 120 | return 0; |
| 121 | |
Mel Gorman | b62f495 | 2009-07-29 15:04:09 -0700 | [diff] [blame] | 122 | prof_buffer = alloc_pages_exact(buffer_bytes, |
| 123 | GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 124 | if (prof_buffer) |
| 125 | return 0; |
| 126 | |
Jesper Juhl | 559fa6e | 2010-10-30 21:56:26 +0200 | [diff] [blame] | 127 | prof_buffer = vzalloc(buffer_bytes); |
| 128 | if (prof_buffer) |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 129 | return 0; |
| 130 | |
Rusty Russell | c309b91 | 2009-01-01 10:12:27 +1030 | [diff] [blame] | 131 | free_cpumask_var(prof_cpu_mask); |
Dave Hansen | 22b8ce9 | 2008-10-15 22:01:46 -0700 | [diff] [blame] | 132 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | /* Profile event notifications */ |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 136 | |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 137 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); |
| 138 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); |
| 139 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 140 | |
| 141 | void profile_task_exit(struct task_struct *task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 143 | blocking_notifier_call_chain(&task_exit_notifier, 0, task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | } |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 145 | |
| 146 | int profile_handoff_task(struct task_struct *task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | { |
| 148 | int ret; |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 149 | ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | return (ret == NOTIFY_OK) ? 1 : 0; |
| 151 | } |
| 152 | |
| 153 | void profile_munmap(unsigned long addr) |
| 154 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 155 | blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | } |
| 157 | |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 158 | int task_handoff_register(struct notifier_block *n) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 160 | return atomic_notifier_chain_register(&task_free_notifier, n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | } |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 162 | EXPORT_SYMBOL_GPL(task_handoff_register); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 164 | int task_handoff_unregister(struct notifier_block *n) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | { |
Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 166 | return atomic_notifier_chain_unregister(&task_free_notifier, n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | } |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 168 | EXPORT_SYMBOL_GPL(task_handoff_unregister); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 170 | int profile_event_register(enum profile_type type, struct notifier_block *n) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | { |
| 172 | int err = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | switch (type) { |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 175 | case PROFILE_TASK_EXIT: |
| 176 | err = blocking_notifier_chain_register( |
| 177 | &task_exit_notifier, n); |
| 178 | break; |
| 179 | case PROFILE_MUNMAP: |
| 180 | err = blocking_notifier_chain_register( |
| 181 | &munmap_notifier, n); |
| 182 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | } |
| 184 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | return err; |
| 186 | } |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 187 | EXPORT_SYMBOL_GPL(profile_event_register); |
| 188 | |
| 189 | int profile_event_unregister(enum profile_type type, struct notifier_block *n) |
| 190 | { |
| 191 | int err = -EINVAL; |
| 192 | |
| 193 | switch (type) { |
| 194 | case PROFILE_TASK_EXIT: |
| 195 | err = blocking_notifier_chain_unregister( |
| 196 | &task_exit_notifier, n); |
| 197 | break; |
| 198 | case PROFILE_MUNMAP: |
| 199 | err = blocking_notifier_chain_unregister( |
| 200 | &munmap_notifier, n); |
| 201 | break; |
| 202 | } |
| 203 | |
| 204 | return err; |
| 205 | } |
| 206 | EXPORT_SYMBOL_GPL(profile_event_unregister); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | |
Arnd Bergmann | ade356b | 2016-03-22 14:27:26 -0700 | [diff] [blame] | 208 | #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | /* |
| 210 | * Each cpu has a pair of open-addressed hashtables for pending |
| 211 | * profile hits. read_profile() IPI's all cpus to request them |
| 212 | * to flip buffers and flushes their contents to prof_buffer itself. |
| 213 | * Flip requests are serialized by the profile_flip_mutex. The sole |
| 214 | * use of having a second hashtable is for avoiding cacheline |
| 215 | * contention that would otherwise happen during flushes of pending |
| 216 | * profile hits required for the accuracy of reported profile hits |
| 217 | * and so resurrect the interrupt livelock issue. |
| 218 | * |
| 219 | * The open-addressed hashtables are indexed by profile buffer slot |
| 220 | * and hold the number of pending hits to that profile buffer slot on |
| 221 | * a cpu in an entry. When the hashtable overflows, all pending hits |
| 222 | * are accounted to their corresponding profile buffer slots with |
| 223 | * atomic_add() and the hashtable emptied. As numerous pending hits |
| 224 | * may be accounted to a profile buffer slot in a hashtable entry, |
| 225 | * this amortizes a number of atomic profile buffer increments likely |
| 226 | * to be far larger than the number of entries in the hashtable, |
| 227 | * particularly given that the number of distinct profile buffer |
| 228 | * positions to which hits are accounted during short intervals (e.g. |
| 229 | * several seconds) is usually very small. Exclusion from buffer |
| 230 | * flipping is provided by interrupt disablement (note that for |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 231 | * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from |
| 232 | * process context). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | * The hash function is meant to be lightweight as opposed to strong, |
| 234 | * and was vaguely inspired by ppc64 firmware-supported inverted |
| 235 | * pagetable hash functions, but uses a full hashtable full of finite |
| 236 | * collision chains, not just pairs of them. |
| 237 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 238 | * -- nyc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | */ |
| 240 | static void __profile_flip_buffers(void *unused) |
| 241 | { |
| 242 | int cpu = smp_processor_id(); |
| 243 | |
| 244 | per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); |
| 245 | } |
| 246 | |
| 247 | static void profile_flip_buffers(void) |
| 248 | { |
| 249 | int i, j, cpu; |
| 250 | |
Arjan van de Ven | 97d1f15 | 2006-03-23 03:00:24 -0800 | [diff] [blame] | 251 | mutex_lock(&profile_flip_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | j = per_cpu(cpu_profile_flip, get_cpu()); |
| 253 | put_cpu(); |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 254 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | for_each_online_cpu(cpu) { |
| 256 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; |
| 257 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
| 258 | if (!hits[i].hits) { |
| 259 | if (hits[i].pc) |
| 260 | hits[i].pc = 0; |
| 261 | continue; |
| 262 | } |
| 263 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); |
| 264 | hits[i].hits = hits[i].pc = 0; |
| 265 | } |
| 266 | } |
Arjan van de Ven | 97d1f15 | 2006-03-23 03:00:24 -0800 | [diff] [blame] | 267 | mutex_unlock(&profile_flip_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | static void profile_discard_flip_buffers(void) |
| 271 | { |
| 272 | int i, cpu; |
| 273 | |
Arjan van de Ven | 97d1f15 | 2006-03-23 03:00:24 -0800 | [diff] [blame] | 274 | mutex_lock(&profile_flip_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | i = per_cpu(cpu_profile_flip, get_cpu()); |
| 276 | put_cpu(); |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 277 | on_each_cpu(__profile_flip_buffers, NULL, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | for_each_online_cpu(cpu) { |
| 279 | struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; |
| 280 | memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); |
| 281 | } |
Arjan van de Ven | 97d1f15 | 2006-03-23 03:00:24 -0800 | [diff] [blame] | 282 | mutex_unlock(&profile_flip_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | } |
| 284 | |
Rakib Mullick | 6f7bd76 | 2011-05-26 16:26:00 -0700 | [diff] [blame] | 285 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | { |
| 287 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; |
| 288 | int i, j, cpu; |
| 289 | struct profile_hit *hits; |
| 290 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); |
| 292 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
| 293 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
| 294 | cpu = get_cpu(); |
| 295 | hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; |
| 296 | if (!hits) { |
| 297 | put_cpu(); |
| 298 | return; |
| 299 | } |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 300 | /* |
| 301 | * We buffer the global profiler buffer into a per-CPU |
| 302 | * queue and thus reduce the number of global (and possibly |
| 303 | * NUMA-alien) accesses. The write-queue is self-coalescing: |
| 304 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | local_irq_save(flags); |
| 306 | do { |
| 307 | for (j = 0; j < PROFILE_GRPSZ; ++j) { |
| 308 | if (hits[i + j].pc == pc) { |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 309 | hits[i + j].hits += nr_hits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | goto out; |
| 311 | } else if (!hits[i + j].hits) { |
| 312 | hits[i + j].pc = pc; |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 313 | hits[i + j].hits = nr_hits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | goto out; |
| 315 | } |
| 316 | } |
| 317 | i = (i + secondary) & (NR_PROFILE_HIT - 1); |
| 318 | } while (i != primary); |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 319 | |
| 320 | /* |
| 321 | * Add the current hit(s) and flush the write-queue out |
| 322 | * to the global buffer: |
| 323 | */ |
| 324 | atomic_add(nr_hits, &prof_buffer[pc]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | for (i = 0; i < NR_PROFILE_HIT; ++i) { |
| 326 | atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); |
| 327 | hits[i].pc = hits[i].hits = 0; |
| 328 | } |
| 329 | out: |
| 330 | local_irq_restore(flags); |
| 331 | put_cpu(); |
| 332 | } |
| 333 | |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 334 | static int profile_dead_cpu(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | { |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 336 | struct page *page; |
| 337 | int i; |
| 338 | |
Nathan Chancellor | ef70eff | 2019-12-04 16:50:50 -0800 | [diff] [blame] | 339 | if (cpumask_available(prof_cpu_mask)) |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 340 | cpumask_clear_cpu(cpu, prof_cpu_mask); |
| 341 | |
| 342 | for (i = 0; i < 2; i++) { |
| 343 | if (per_cpu(cpu_profile_hits, cpu)[i]) { |
| 344 | page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); |
| 345 | per_cpu(cpu_profile_hits, cpu)[i] = NULL; |
| 346 | __free_page(page); |
| 347 | } |
| 348 | } |
| 349 | return 0; |
| 350 | } |
| 351 | |
| 352 | static int profile_prepare_cpu(unsigned int cpu) |
| 353 | { |
| 354 | int i, node = cpu_to_mem(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | struct page *page; |
| 356 | |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 357 | per_cpu(cpu_profile_flip, cpu) = 0; |
| 358 | |
| 359 | for (i = 0; i < 2; i++) { |
| 360 | if (per_cpu(cpu_profile_hits, cpu)[i]) |
| 361 | continue; |
| 362 | |
| 363 | page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); |
| 364 | if (!page) { |
| 365 | profile_dead_cpu(cpu); |
| 366 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | } |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 368 | per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); |
| 369 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | } |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 371 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | } |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 373 | |
| 374 | static int profile_online_cpu(unsigned int cpu) |
| 375 | { |
Nathan Chancellor | ef70eff | 2019-12-04 16:50:50 -0800 | [diff] [blame] | 376 | if (cpumask_available(prof_cpu_mask)) |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 377 | cpumask_set_cpu(cpu, prof_cpu_mask); |
| 378 | |
| 379 | return 0; |
| 380 | } |
| 381 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | #else /* !CONFIG_SMP */ |
| 383 | #define profile_flip_buffers() do { } while (0) |
| 384 | #define profile_discard_flip_buffers() do { } while (0) |
| 385 | |
Rakib Mullick | 6f7bd76 | 2011-05-26 16:26:00 -0700 | [diff] [blame] | 386 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | { |
| 388 | unsigned long pc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; |
Ingo Molnar | ece8a68 | 2006-12-06 20:37:24 -0800 | [diff] [blame] | 390 | atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | } |
| 392 | #endif /* !CONFIG_SMP */ |
Rakib Mullick | 6f7bd76 | 2011-05-26 16:26:00 -0700 | [diff] [blame] | 393 | |
| 394 | void profile_hits(int type, void *__pc, unsigned int nr_hits) |
| 395 | { |
| 396 | if (prof_on != type || !prof_buffer) |
| 397 | return; |
| 398 | do_profile_hits(type, __pc, nr_hits); |
| 399 | } |
Andrew Morton | bbe1a59b | 2007-01-22 20:40:33 -0800 | [diff] [blame] | 400 | EXPORT_SYMBOL_GPL(profile_hits); |
| 401 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 402 | void profile_tick(int type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | { |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 404 | struct pt_regs *regs = get_irq_regs(); |
| 405 | |
Nathan Chancellor | ef70eff | 2019-12-04 16:50:50 -0800 | [diff] [blame] | 406 | if (!user_mode(regs) && cpumask_available(prof_cpu_mask) && |
Rusty Russell | c309b91 | 2009-01-01 10:12:27 +1030 | [diff] [blame] | 407 | cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | profile_hit(type, (void *)profile_pc(regs)); |
| 409 | } |
| 410 | |
| 411 | #ifdef CONFIG_PROC_FS |
| 412 | #include <linux/proc_fs.h> |
Alexey Dobriyan | 583a22e | 2009-09-18 12:57:09 -0700 | [diff] [blame] | 413 | #include <linux/seq_file.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 414 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | |
Alexey Dobriyan | 583a22e | 2009-09-18 12:57:09 -0700 | [diff] [blame] | 416 | static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | { |
Tejun Heo | ccbd59c | 2015-02-13 14:38:13 -0800 | [diff] [blame] | 418 | seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask)); |
Alexey Dobriyan | 583a22e | 2009-09-18 12:57:09 -0700 | [diff] [blame] | 419 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | } |
| 421 | |
Alexey Dobriyan | 583a22e | 2009-09-18 12:57:09 -0700 | [diff] [blame] | 422 | static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | { |
Alexey Dobriyan | 583a22e | 2009-09-18 12:57:09 -0700 | [diff] [blame] | 424 | return single_open(file, prof_cpu_mask_proc_show, NULL); |
| 425 | } |
| 426 | |
| 427 | static ssize_t prof_cpu_mask_proc_write(struct file *file, |
| 428 | const char __user *buffer, size_t count, loff_t *pos) |
| 429 | { |
Rusty Russell | c309b91 | 2009-01-01 10:12:27 +1030 | [diff] [blame] | 430 | cpumask_var_t new_value; |
Alexey Dobriyan | 583a22e | 2009-09-18 12:57:09 -0700 | [diff] [blame] | 431 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | |
Tetsuo Handa | c5e3a41 | 2021-04-01 14:58:23 +0900 | [diff] [blame^] | 433 | if (!zalloc_cpumask_var(&new_value, GFP_KERNEL)) |
Rusty Russell | c309b91 | 2009-01-01 10:12:27 +1030 | [diff] [blame] | 434 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | |
Rusty Russell | c309b91 | 2009-01-01 10:12:27 +1030 | [diff] [blame] | 436 | err = cpumask_parse_user(buffer, count, new_value); |
| 437 | if (!err) { |
Alexey Dobriyan | 583a22e | 2009-09-18 12:57:09 -0700 | [diff] [blame] | 438 | cpumask_copy(prof_cpu_mask, new_value); |
| 439 | err = count; |
Rusty Russell | c309b91 | 2009-01-01 10:12:27 +1030 | [diff] [blame] | 440 | } |
| 441 | free_cpumask_var(new_value); |
| 442 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | } |
| 444 | |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 445 | static const struct proc_ops prof_cpu_mask_proc_ops = { |
| 446 | .proc_open = prof_cpu_mask_proc_open, |
| 447 | .proc_read = seq_read, |
| 448 | .proc_lseek = seq_lseek, |
| 449 | .proc_release = single_release, |
| 450 | .proc_write = prof_cpu_mask_proc_write, |
Alexey Dobriyan | 583a22e | 2009-09-18 12:57:09 -0700 | [diff] [blame] | 451 | }; |
| 452 | |
Al Viro | fbd387a | 2013-04-01 20:48:34 -0400 | [diff] [blame] | 453 | void create_prof_cpu_mask(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | /* create /proc/irq/prof_cpu_mask */ |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 456 | proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | } |
| 458 | |
| 459 | /* |
| 460 | * This function accesses profiling information. The returned data is |
| 461 | * binary: the sampling step and the actual contents of the profile |
| 462 | * buffer. Use of the program readprofile is recommended in order to |
| 463 | * get meaningful info out of these data. |
| 464 | */ |
| 465 | static ssize_t |
| 466 | read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
| 467 | { |
| 468 | unsigned long p = *ppos; |
| 469 | ssize_t read; |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 470 | char *pnt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | unsigned int sample_step = 1 << prof_shift; |
| 472 | |
| 473 | profile_flip_buffers(); |
| 474 | if (p >= (prof_len+1)*sizeof(unsigned int)) |
| 475 | return 0; |
| 476 | if (count > (prof_len+1)*sizeof(unsigned int) - p) |
| 477 | count = (prof_len+1)*sizeof(unsigned int) - p; |
| 478 | read = 0; |
| 479 | |
| 480 | while (p < sizeof(unsigned int) && count > 0) { |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 481 | if (put_user(*((char *)(&sample_step)+p), buf)) |
Heiko Carstens | 064b022 | 2006-12-06 20:36:37 -0800 | [diff] [blame] | 482 | return -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | buf++; p++; count--; read++; |
| 484 | } |
| 485 | pnt = (char *)prof_buffer + p - sizeof(atomic_t); |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 486 | if (copy_to_user(buf, (void *)pnt, count)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | return -EFAULT; |
| 488 | read += count; |
| 489 | *ppos += read; |
| 490 | return read; |
| 491 | } |
| 492 | |
| 493 | /* |
| 494 | * Writing to /proc/profile resets the counters |
| 495 | * |
| 496 | * Writing a 'profiling multiplier' value into it also re-sets the profiling |
| 497 | * interrupt frequency, on architectures that support this. |
| 498 | */ |
| 499 | static ssize_t write_profile(struct file *file, const char __user *buf, |
| 500 | size_t count, loff_t *ppos) |
| 501 | { |
| 502 | #ifdef CONFIG_SMP |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 503 | extern int setup_profiling_timer(unsigned int multiplier); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | |
| 505 | if (count == sizeof(int)) { |
| 506 | unsigned int multiplier; |
| 507 | |
| 508 | if (copy_from_user(&multiplier, buf, sizeof(int))) |
| 509 | return -EFAULT; |
| 510 | |
| 511 | if (setup_profiling_timer(multiplier)) |
| 512 | return -EINVAL; |
| 513 | } |
| 514 | #endif |
| 515 | profile_discard_flip_buffers(); |
| 516 | memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); |
| 517 | return count; |
| 518 | } |
| 519 | |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 520 | static const struct proc_ops profile_proc_ops = { |
| 521 | .proc_read = read_profile, |
| 522 | .proc_write = write_profile, |
| 523 | .proc_lseek = default_llseek, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | }; |
| 525 | |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 526 | int __ref create_proc_profile(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | { |
| 528 | struct proc_dir_entry *entry; |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 529 | #ifdef CONFIG_SMP |
| 530 | enum cpuhp_state online_state; |
| 531 | #endif |
| 532 | |
Srivatsa S. Bhat | c270a81 | 2014-03-11 02:12:08 +0530 | [diff] [blame] | 533 | int err = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | |
| 535 | if (!prof_on) |
| 536 | return 0; |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 537 | #ifdef CONFIG_SMP |
| 538 | err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE", |
| 539 | profile_prepare_cpu, profile_dead_cpu); |
| 540 | if (err) |
| 541 | return err; |
Srivatsa S. Bhat | c270a81 | 2014-03-11 02:12:08 +0530 | [diff] [blame] | 542 | |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 543 | err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE", |
| 544 | profile_online_cpu, NULL); |
| 545 | if (err < 0) |
| 546 | goto err_state_prep; |
| 547 | online_state = err; |
| 548 | err = 0; |
| 549 | #endif |
Denis V. Lunev | c33fff0 | 2008-04-29 01:02:31 -0700 | [diff] [blame] | 550 | entry = proc_create("profile", S_IWUSR | S_IRUGO, |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 551 | NULL, &profile_proc_ops); |
Paolo Ciarrocchi | 1ad82fd | 2008-01-25 21:08:33 +0100 | [diff] [blame] | 552 | if (!entry) |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 553 | goto err_state_onl; |
David Howells | 271a15e | 2013-04-12 00:38:51 +0100 | [diff] [blame] | 554 | proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); |
Srivatsa S. Bhat | c270a81 | 2014-03-11 02:12:08 +0530 | [diff] [blame] | 555 | |
Sebastian Andrzej Siewior | e722d8d | 2016-07-13 17:16:59 +0000 | [diff] [blame] | 556 | return err; |
| 557 | err_state_onl: |
| 558 | #ifdef CONFIG_SMP |
| 559 | cpuhp_remove_state(online_state); |
| 560 | err_state_prep: |
| 561 | cpuhp_remove_state(CPUHP_PROFILE_PREPARE); |
| 562 | #endif |
Srivatsa S. Bhat | c270a81 | 2014-03-11 02:12:08 +0530 | [diff] [blame] | 563 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | } |
Paul Gortmaker | c96d666 | 2014-04-03 14:48:35 -0700 | [diff] [blame] | 565 | subsys_initcall(create_proc_profile); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | #endif /* CONFIG_PROC_FS */ |