Steven Rostedt (VMware) | bcea3f9 | 2018-08-16 11:23:53 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 2 | /* |
| 3 | * trace_hwlatdetect.c - A simple Hardware Latency detector. |
| 4 | * |
| 5 | * Use this tracer to detect large system latencies induced by the behavior of |
| 6 | * certain underlying system hardware or firmware, independent of Linux itself. |
| 7 | * The code was developed originally to detect the presence of SMIs on Intel |
| 8 | * and AMD systems, although there is no dependency upon x86 herein. |
| 9 | * |
| 10 | * The classical example usage of this tracer is in detecting the presence of |
| 11 | * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a |
| 12 | * somewhat special form of hardware interrupt spawned from earlier CPU debug |
| 13 | * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge |
| 14 | * LPC (or other device) to generate a special interrupt under certain |
| 15 | * circumstances, for example, upon expiration of a special SMI timer device, |
| 16 | * due to certain external thermal readings, on certain I/O address accesses, |
| 17 | * and other situations. An SMI hits a special CPU pin, triggers a special |
| 18 | * SMI mode (complete with special memory map), and the OS is unaware. |
| 19 | * |
| 20 | * Although certain hardware-inducing latencies are necessary (for example, |
| 21 | * a modern system often requires an SMI handler for correct thermal control |
| 22 | * and remote management) they can wreak havoc upon any OS-level performance |
| 23 | * guarantees toward low-latency, especially when the OS is not even made |
| 24 | * aware of the presence of these interrupts. For this reason, we need a |
| 25 | * somewhat brute force mechanism to detect these interrupts. In this case, |
| 26 | * we do it by hogging all of the CPU(s) for configurable timer intervals, |
| 27 | * sampling the built-in CPU timer, looking for discontiguous readings. |
| 28 | * |
| 29 | * WARNING: This implementation necessarily introduces latencies. Therefore, |
| 30 | * you should NEVER use this tracer while running in a production |
| 31 | * environment requiring any kind of low-latency performance |
| 32 | * guarantee(s). |
| 33 | * |
| 34 | * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com> |
| 35 | * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com> |
| 36 | * |
| 37 | * Includes useful feedback from Clark Williams <clark@redhat.com> |
| 38 | * |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 39 | */ |
| 40 | #include <linux/kthread.h> |
| 41 | #include <linux/tracefs.h> |
| 42 | #include <linux/uaccess.h> |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 43 | #include <linux/cpumask.h> |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 44 | #include <linux/delay.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 45 | #include <linux/sched/clock.h> |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 46 | #include "trace.h" |
| 47 | |
| 48 | static struct trace_array *hwlat_trace; |
| 49 | |
| 50 | #define U64STR_SIZE 22 /* 20 digits max */ |
| 51 | |
| 52 | #define BANNER "hwlat_detector: " |
| 53 | #define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ |
| 54 | #define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ |
| 55 | #define DEFAULT_LAT_THRESHOLD 10 /* 10us */ |
| 56 | |
| 57 | /* sampling thread*/ |
| 58 | static struct task_struct *hwlat_kthread; |
| 59 | |
| 60 | static struct dentry *hwlat_sample_width; /* sample width us */ |
| 61 | static struct dentry *hwlat_sample_window; /* sample window us */ |
| 62 | |
| 63 | /* Save the previous tracing_thresh value */ |
| 64 | static unsigned long save_tracing_thresh; |
| 65 | |
Steven Rostedt (Red Hat) | 7b2c862 | 2016-08-04 12:49:53 -0400 | [diff] [blame] | 66 | /* NMI timestamp counters */ |
| 67 | static u64 nmi_ts_start; |
| 68 | static u64 nmi_total_ts; |
| 69 | static int nmi_count; |
| 70 | static int nmi_cpu; |
| 71 | |
| 72 | /* Tells NMIs to call back to the hwlat tracer to record timestamps */ |
| 73 | bool trace_hwlat_callback_enabled; |
| 74 | |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 75 | /* If the user changed threshold, remember it */ |
| 76 | static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC; |
| 77 | |
| 78 | /* Individual latency samples are stored here when detected. */ |
| 79 | struct hwlat_sample { |
Deepa Dinamani | 51aad0a | 2017-05-08 15:59:13 -0700 | [diff] [blame] | 80 | u64 seqnum; /* unique sequence */ |
| 81 | u64 duration; /* delta */ |
| 82 | u64 outer_duration; /* delta (outer loop) */ |
| 83 | u64 nmi_total_ts; /* Total time spent in NMIs */ |
| 84 | struct timespec64 timestamp; /* wall time */ |
| 85 | int nmi_count; /* # NMIs during this sample */ |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 86 | }; |
| 87 | |
| 88 | /* keep the global state somewhere. */ |
| 89 | static struct hwlat_data { |
| 90 | |
| 91 | struct mutex lock; /* protect changes */ |
| 92 | |
| 93 | u64 count; /* total since reset */ |
| 94 | |
| 95 | u64 sample_window; /* total sampling window (on+off) */ |
| 96 | u64 sample_width; /* active sampling portion of window */ |
| 97 | |
| 98 | } hwlat_data = { |
| 99 | .sample_window = DEFAULT_SAMPLE_WINDOW, |
| 100 | .sample_width = DEFAULT_SAMPLE_WIDTH, |
| 101 | }; |
| 102 | |
| 103 | static void trace_hwlat_sample(struct hwlat_sample *sample) |
| 104 | { |
| 105 | struct trace_array *tr = hwlat_trace; |
| 106 | struct trace_event_call *call = &event_hwlat; |
| 107 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
| 108 | struct ring_buffer_event *event; |
| 109 | struct hwlat_entry *entry; |
| 110 | unsigned long flags; |
| 111 | int pc; |
| 112 | |
| 113 | pc = preempt_count(); |
| 114 | local_save_flags(flags); |
| 115 | |
| 116 | event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry), |
| 117 | flags, pc); |
| 118 | if (!event) |
| 119 | return; |
| 120 | entry = ring_buffer_event_data(event); |
| 121 | entry->seqnum = sample->seqnum; |
| 122 | entry->duration = sample->duration; |
| 123 | entry->outer_duration = sample->outer_duration; |
| 124 | entry->timestamp = sample->timestamp; |
Steven Rostedt (Red Hat) | 7b2c862 | 2016-08-04 12:49:53 -0400 | [diff] [blame] | 125 | entry->nmi_total_ts = sample->nmi_total_ts; |
| 126 | entry->nmi_count = sample->nmi_count; |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 127 | |
| 128 | if (!call_filter_check_discard(call, entry, buffer, event)) |
Steven Rostedt (Red Hat) | 52ffabe3 | 2016-11-23 20:28:38 -0500 | [diff] [blame] | 129 | trace_buffer_unlock_commit_nostack(buffer, event); |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | /* Macros to encapsulate the time capturing infrastructure */ |
| 133 | #define time_type u64 |
| 134 | #define time_get() trace_clock_local() |
| 135 | #define time_to_us(x) div_u64(x, 1000) |
| 136 | #define time_sub(a, b) ((a) - (b)) |
| 137 | #define init_time(a, b) (a = b) |
| 138 | #define time_u64(a) a |
| 139 | |
Steven Rostedt (Red Hat) | 7b2c862 | 2016-08-04 12:49:53 -0400 | [diff] [blame] | 140 | void trace_hwlat_callback(bool enter) |
| 141 | { |
| 142 | if (smp_processor_id() != nmi_cpu) |
| 143 | return; |
| 144 | |
| 145 | /* |
| 146 | * Currently trace_clock_local() calls sched_clock() and the |
| 147 | * generic version is not NMI safe. |
| 148 | */ |
| 149 | if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) { |
| 150 | if (enter) |
| 151 | nmi_ts_start = time_get(); |
| 152 | else |
| 153 | nmi_total_ts = time_get() - nmi_ts_start; |
| 154 | } |
| 155 | |
| 156 | if (enter) |
| 157 | nmi_count++; |
| 158 | } |
| 159 | |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 160 | /** |
| 161 | * get_sample - sample the CPU TSC and look for likely hardware latencies |
| 162 | * |
| 163 | * Used to repeatedly capture the CPU TSC (or similar), looking for potential |
| 164 | * hardware-induced latency. Called with interrupts disabled and with |
| 165 | * hwlat_data.lock held. |
| 166 | */ |
| 167 | static int get_sample(void) |
| 168 | { |
| 169 | struct trace_array *tr = hwlat_trace; |
| 170 | time_type start, t1, t2, last_t2; |
| 171 | s64 diff, total, last_total = 0; |
| 172 | u64 sample = 0; |
| 173 | u64 thresh = tracing_thresh; |
| 174 | u64 outer_sample = 0; |
| 175 | int ret = -1; |
| 176 | |
| 177 | do_div(thresh, NSEC_PER_USEC); /* modifies interval value */ |
| 178 | |
Steven Rostedt (Red Hat) | 7b2c862 | 2016-08-04 12:49:53 -0400 | [diff] [blame] | 179 | nmi_cpu = smp_processor_id(); |
| 180 | nmi_total_ts = 0; |
| 181 | nmi_count = 0; |
| 182 | /* Make sure NMIs see this first */ |
| 183 | barrier(); |
| 184 | |
| 185 | trace_hwlat_callback_enabled = true; |
| 186 | |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 187 | init_time(last_t2, 0); |
| 188 | start = time_get(); /* start timestamp */ |
| 189 | |
| 190 | do { |
| 191 | |
| 192 | t1 = time_get(); /* we'll look for a discontinuity */ |
| 193 | t2 = time_get(); |
| 194 | |
| 195 | if (time_u64(last_t2)) { |
| 196 | /* Check the delta from outer loop (t2 to next t1) */ |
| 197 | diff = time_to_us(time_sub(t1, last_t2)); |
| 198 | /* This shouldn't happen */ |
| 199 | if (diff < 0) { |
| 200 | pr_err(BANNER "time running backwards\n"); |
| 201 | goto out; |
| 202 | } |
| 203 | if (diff > outer_sample) |
| 204 | outer_sample = diff; |
| 205 | } |
| 206 | last_t2 = t2; |
| 207 | |
| 208 | total = time_to_us(time_sub(t2, start)); /* sample width */ |
| 209 | |
| 210 | /* Check for possible overflows */ |
| 211 | if (total < last_total) { |
| 212 | pr_err("Time total overflowed\n"); |
| 213 | break; |
| 214 | } |
| 215 | last_total = total; |
| 216 | |
| 217 | /* This checks the inner loop (t1 to t2) */ |
| 218 | diff = time_to_us(time_sub(t2, t1)); /* current diff */ |
| 219 | |
| 220 | /* This shouldn't happen */ |
| 221 | if (diff < 0) { |
| 222 | pr_err(BANNER "time running backwards\n"); |
| 223 | goto out; |
| 224 | } |
| 225 | |
| 226 | if (diff > sample) |
| 227 | sample = diff; /* only want highest value */ |
| 228 | |
| 229 | } while (total <= hwlat_data.sample_width); |
| 230 | |
Steven Rostedt (Red Hat) | 7b2c862 | 2016-08-04 12:49:53 -0400 | [diff] [blame] | 231 | barrier(); /* finish the above in the view for NMIs */ |
| 232 | trace_hwlat_callback_enabled = false; |
| 233 | barrier(); /* Make sure nmi_total_ts is no longer updated */ |
| 234 | |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 235 | ret = 0; |
| 236 | |
| 237 | /* If we exceed the threshold value, we have found a hardware latency */ |
| 238 | if (sample > thresh || outer_sample > thresh) { |
| 239 | struct hwlat_sample s; |
| 240 | |
| 241 | ret = 1; |
| 242 | |
Steven Rostedt (Red Hat) | 7b2c862 | 2016-08-04 12:49:53 -0400 | [diff] [blame] | 243 | /* We read in microseconds */ |
| 244 | if (nmi_total_ts) |
| 245 | do_div(nmi_total_ts, NSEC_PER_USEC); |
| 246 | |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 247 | hwlat_data.count++; |
| 248 | s.seqnum = hwlat_data.count; |
| 249 | s.duration = sample; |
| 250 | s.outer_duration = outer_sample; |
Deepa Dinamani | 51aad0a | 2017-05-08 15:59:13 -0700 | [diff] [blame] | 251 | ktime_get_real_ts64(&s.timestamp); |
Steven Rostedt (Red Hat) | 7b2c862 | 2016-08-04 12:49:53 -0400 | [diff] [blame] | 252 | s.nmi_total_ts = nmi_total_ts; |
| 253 | s.nmi_count = nmi_count; |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 254 | trace_hwlat_sample(&s); |
| 255 | |
| 256 | /* Keep a running maximum ever recorded hardware latency */ |
| 257 | if (sample > tr->max_latency) |
| 258 | tr->max_latency = sample; |
| 259 | } |
| 260 | |
| 261 | out: |
| 262 | return ret; |
| 263 | } |
| 264 | |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 265 | static struct cpumask save_cpumask; |
| 266 | static bool disable_migrate; |
| 267 | |
Steven Rostedt (VMware) | f447c19 | 2017-01-31 16:48:23 -0500 | [diff] [blame] | 268 | static void move_to_next_cpu(void) |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 269 | { |
Steven Rostedt (VMware) | f447c19 | 2017-01-31 16:48:23 -0500 | [diff] [blame] | 270 | struct cpumask *current_mask = &save_cpumask; |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 271 | int next_cpu; |
| 272 | |
| 273 | if (disable_migrate) |
| 274 | return; |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 275 | /* |
| 276 | * If for some reason the user modifies the CPU affinity |
| 277 | * of this thread, than stop migrating for the duration |
| 278 | * of the current test. |
| 279 | */ |
Sebastian Andrzej Siewior | 3bd3706 | 2019-04-23 16:26:36 +0200 | [diff] [blame^] | 280 | if (!cpumask_equal(current_mask, current->cpus_ptr)) |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 281 | goto disable; |
| 282 | |
| 283 | get_online_cpus(); |
| 284 | cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); |
| 285 | next_cpu = cpumask_next(smp_processor_id(), current_mask); |
| 286 | put_online_cpus(); |
| 287 | |
| 288 | if (next_cpu >= nr_cpu_ids) |
| 289 | next_cpu = cpumask_first(current_mask); |
| 290 | |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 291 | if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ |
| 292 | goto disable; |
| 293 | |
| 294 | cpumask_clear(current_mask); |
| 295 | cpumask_set_cpu(next_cpu, current_mask); |
| 296 | |
| 297 | sched_setaffinity(0, current_mask); |
| 298 | return; |
| 299 | |
| 300 | disable: |
| 301 | disable_migrate = true; |
| 302 | } |
| 303 | |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 304 | /* |
| 305 | * kthread_fn - The CPU time sampling/hardware latency detection kernel thread |
| 306 | * |
| 307 | * Used to periodically sample the CPU TSC via a call to get_sample. We |
| 308 | * disable interrupts, which does (intentionally) introduce latency since we |
| 309 | * need to ensure nothing else might be running (and thus preempting). |
| 310 | * Obviously this should never be used in production environments. |
| 311 | * |
Luiz Capitulino | 8e0f114 | 2017-02-13 12:25:17 -0500 | [diff] [blame] | 312 | * Executes one loop interaction on each CPU in tracing_cpumask sysfs file. |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 313 | */ |
| 314 | static int kthread_fn(void *data) |
| 315 | { |
| 316 | u64 interval; |
| 317 | |
| 318 | while (!kthread_should_stop()) { |
| 319 | |
Steven Rostedt (VMware) | f447c19 | 2017-01-31 16:48:23 -0500 | [diff] [blame] | 320 | move_to_next_cpu(); |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 321 | |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 322 | local_irq_disable(); |
| 323 | get_sample(); |
| 324 | local_irq_enable(); |
| 325 | |
| 326 | mutex_lock(&hwlat_data.lock); |
| 327 | interval = hwlat_data.sample_window - hwlat_data.sample_width; |
| 328 | mutex_unlock(&hwlat_data.lock); |
| 329 | |
| 330 | do_div(interval, USEC_PER_MSEC); /* modifies interval value */ |
| 331 | |
| 332 | /* Always sleep for at least 1ms */ |
| 333 | if (interval < 1) |
| 334 | interval = 1; |
| 335 | |
| 336 | if (msleep_interruptible(interval)) |
| 337 | break; |
| 338 | } |
| 339 | |
| 340 | return 0; |
| 341 | } |
| 342 | |
| 343 | /** |
| 344 | * start_kthread - Kick off the hardware latency sampling/detector kthread |
| 345 | * |
| 346 | * This starts the kernel thread that will sit and sample the CPU timestamp |
| 347 | * counter (TSC or similar) and look for potential hardware latencies. |
| 348 | */ |
| 349 | static int start_kthread(struct trace_array *tr) |
| 350 | { |
Steven Rostedt (VMware) | f447c19 | 2017-01-31 16:48:23 -0500 | [diff] [blame] | 351 | struct cpumask *current_mask = &save_cpumask; |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 352 | struct task_struct *kthread; |
Steven Rostedt (VMware) | f447c19 | 2017-01-31 16:48:23 -0500 | [diff] [blame] | 353 | int next_cpu; |
| 354 | |
Steven Rostedt (VMware) | 978defe | 2018-08-01 16:06:02 -0400 | [diff] [blame] | 355 | if (WARN_ON(hwlat_kthread)) |
Erica Bugden | 82fbc8c | 2018-08-01 12:45:54 +0200 | [diff] [blame] | 356 | return 0; |
| 357 | |
Steven Rostedt (VMware) | f447c19 | 2017-01-31 16:48:23 -0500 | [diff] [blame] | 358 | /* Just pick the first CPU on first iteration */ |
| 359 | current_mask = &save_cpumask; |
| 360 | get_online_cpus(); |
| 361 | cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); |
| 362 | put_online_cpus(); |
| 363 | next_cpu = cpumask_first(current_mask); |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 364 | |
| 365 | kthread = kthread_create(kthread_fn, NULL, "hwlatd"); |
| 366 | if (IS_ERR(kthread)) { |
| 367 | pr_err(BANNER "could not start sampling thread\n"); |
| 368 | return -ENOMEM; |
| 369 | } |
Steven Rostedt (VMware) | f447c19 | 2017-01-31 16:48:23 -0500 | [diff] [blame] | 370 | |
| 371 | cpumask_clear(current_mask); |
| 372 | cpumask_set_cpu(next_cpu, current_mask); |
| 373 | sched_setaffinity(kthread->pid, current_mask); |
| 374 | |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 375 | hwlat_kthread = kthread; |
| 376 | wake_up_process(kthread); |
| 377 | |
| 378 | return 0; |
| 379 | } |
| 380 | |
| 381 | /** |
| 382 | * stop_kthread - Inform the hardware latency samping/detector kthread to stop |
| 383 | * |
| 384 | * This kicks the running hardware latency sampling/detector kernel thread and |
| 385 | * tells it to stop sampling now. Use this on unload and at system shutdown. |
| 386 | */ |
| 387 | static void stop_kthread(void) |
| 388 | { |
| 389 | if (!hwlat_kthread) |
| 390 | return; |
| 391 | kthread_stop(hwlat_kthread); |
| 392 | hwlat_kthread = NULL; |
| 393 | } |
| 394 | |
| 395 | /* |
| 396 | * hwlat_read - Wrapper read function for reading both window and width |
| 397 | * @filp: The active open file structure |
| 398 | * @ubuf: The userspace provided buffer to read value into |
| 399 | * @cnt: The maximum number of bytes to read |
| 400 | * @ppos: The current "file" position |
| 401 | * |
| 402 | * This function provides a generic read implementation for the global state |
| 403 | * "hwlat_data" structure filesystem entries. |
| 404 | */ |
| 405 | static ssize_t hwlat_read(struct file *filp, char __user *ubuf, |
| 406 | size_t cnt, loff_t *ppos) |
| 407 | { |
| 408 | char buf[U64STR_SIZE]; |
| 409 | u64 *entry = filp->private_data; |
| 410 | u64 val; |
| 411 | int len; |
| 412 | |
| 413 | if (!entry) |
| 414 | return -EFAULT; |
| 415 | |
| 416 | if (cnt > sizeof(buf)) |
| 417 | cnt = sizeof(buf); |
| 418 | |
| 419 | val = *entry; |
| 420 | |
| 421 | len = snprintf(buf, sizeof(buf), "%llu\n", val); |
| 422 | |
| 423 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); |
| 424 | } |
| 425 | |
| 426 | /** |
| 427 | * hwlat_width_write - Write function for "width" entry |
| 428 | * @filp: The active open file structure |
| 429 | * @ubuf: The user buffer that contains the value to write |
| 430 | * @cnt: The maximum number of bytes to write to "file" |
| 431 | * @ppos: The current position in @file |
| 432 | * |
| 433 | * This function provides a write implementation for the "width" interface |
| 434 | * to the hardware latency detector. It can be used to configure |
| 435 | * for how many us of the total window us we will actively sample for any |
| 436 | * hardware-induced latency periods. Obviously, it is not possible to |
| 437 | * sample constantly and have the system respond to a sample reader, or, |
| 438 | * worse, without having the system appear to have gone out to lunch. It |
| 439 | * is enforced that width is less that the total window size. |
| 440 | */ |
| 441 | static ssize_t |
| 442 | hwlat_width_write(struct file *filp, const char __user *ubuf, |
| 443 | size_t cnt, loff_t *ppos) |
| 444 | { |
| 445 | u64 val; |
| 446 | int err; |
| 447 | |
| 448 | err = kstrtoull_from_user(ubuf, cnt, 10, &val); |
| 449 | if (err) |
| 450 | return err; |
| 451 | |
| 452 | mutex_lock(&hwlat_data.lock); |
| 453 | if (val < hwlat_data.sample_window) |
| 454 | hwlat_data.sample_width = val; |
| 455 | else |
| 456 | err = -EINVAL; |
| 457 | mutex_unlock(&hwlat_data.lock); |
| 458 | |
| 459 | if (err) |
| 460 | return err; |
| 461 | |
| 462 | return cnt; |
| 463 | } |
| 464 | |
| 465 | /** |
| 466 | * hwlat_window_write - Write function for "window" entry |
| 467 | * @filp: The active open file structure |
| 468 | * @ubuf: The user buffer that contains the value to write |
| 469 | * @cnt: The maximum number of bytes to write to "file" |
| 470 | * @ppos: The current position in @file |
| 471 | * |
| 472 | * This function provides a write implementation for the "window" interface |
| 473 | * to the hardware latency detetector. The window is the total time |
| 474 | * in us that will be considered one sample period. Conceptually, windows |
| 475 | * occur back-to-back and contain a sample width period during which |
| 476 | * actual sampling occurs. Can be used to write a new total window size. It |
| 477 | * is enfoced that any value written must be greater than the sample width |
| 478 | * size, or an error results. |
| 479 | */ |
| 480 | static ssize_t |
| 481 | hwlat_window_write(struct file *filp, const char __user *ubuf, |
| 482 | size_t cnt, loff_t *ppos) |
| 483 | { |
| 484 | u64 val; |
| 485 | int err; |
| 486 | |
| 487 | err = kstrtoull_from_user(ubuf, cnt, 10, &val); |
| 488 | if (err) |
| 489 | return err; |
| 490 | |
| 491 | mutex_lock(&hwlat_data.lock); |
| 492 | if (hwlat_data.sample_width < val) |
| 493 | hwlat_data.sample_window = val; |
| 494 | else |
| 495 | err = -EINVAL; |
| 496 | mutex_unlock(&hwlat_data.lock); |
| 497 | |
| 498 | if (err) |
| 499 | return err; |
| 500 | |
| 501 | return cnt; |
| 502 | } |
| 503 | |
| 504 | static const struct file_operations width_fops = { |
| 505 | .open = tracing_open_generic, |
| 506 | .read = hwlat_read, |
| 507 | .write = hwlat_width_write, |
| 508 | }; |
| 509 | |
| 510 | static const struct file_operations window_fops = { |
| 511 | .open = tracing_open_generic, |
| 512 | .read = hwlat_read, |
| 513 | .write = hwlat_window_write, |
| 514 | }; |
| 515 | |
| 516 | /** |
| 517 | * init_tracefs - A function to initialize the tracefs interface files |
| 518 | * |
| 519 | * This function creates entries in tracefs for "hwlat_detector". |
| 520 | * It creates the hwlat_detector directory in the tracing directory, |
| 521 | * and within that directory is the count, width and window files to |
| 522 | * change and view those values. |
| 523 | */ |
| 524 | static int init_tracefs(void) |
| 525 | { |
| 526 | struct dentry *d_tracer; |
| 527 | struct dentry *top_dir; |
| 528 | |
| 529 | d_tracer = tracing_init_dentry(); |
| 530 | if (IS_ERR(d_tracer)) |
| 531 | return -ENOMEM; |
| 532 | |
| 533 | top_dir = tracefs_create_dir("hwlat_detector", d_tracer); |
| 534 | if (!top_dir) |
| 535 | return -ENOMEM; |
| 536 | |
| 537 | hwlat_sample_window = tracefs_create_file("window", 0640, |
| 538 | top_dir, |
| 539 | &hwlat_data.sample_window, |
| 540 | &window_fops); |
| 541 | if (!hwlat_sample_window) |
| 542 | goto err; |
| 543 | |
| 544 | hwlat_sample_width = tracefs_create_file("width", 0644, |
| 545 | top_dir, |
| 546 | &hwlat_data.sample_width, |
| 547 | &width_fops); |
| 548 | if (!hwlat_sample_width) |
| 549 | goto err; |
| 550 | |
| 551 | return 0; |
| 552 | |
| 553 | err: |
| 554 | tracefs_remove_recursive(top_dir); |
| 555 | return -ENOMEM; |
| 556 | } |
| 557 | |
| 558 | static void hwlat_tracer_start(struct trace_array *tr) |
| 559 | { |
| 560 | int err; |
| 561 | |
| 562 | err = start_kthread(tr); |
| 563 | if (err) |
| 564 | pr_err(BANNER "Cannot start hwlat kthread\n"); |
| 565 | } |
| 566 | |
| 567 | static void hwlat_tracer_stop(struct trace_array *tr) |
| 568 | { |
| 569 | stop_kthread(); |
| 570 | } |
| 571 | |
| 572 | static bool hwlat_busy; |
| 573 | |
| 574 | static int hwlat_tracer_init(struct trace_array *tr) |
| 575 | { |
| 576 | /* Only allow one instance to enable this */ |
| 577 | if (hwlat_busy) |
| 578 | return -EBUSY; |
| 579 | |
| 580 | hwlat_trace = tr; |
| 581 | |
Steven Rostedt (Red Hat) | 0330f7a | 2016-07-15 15:48:56 -0400 | [diff] [blame] | 582 | disable_migrate = false; |
Steven Rostedt (Red Hat) | e7c15cd | 2016-06-23 12:45:36 -0400 | [diff] [blame] | 583 | hwlat_data.count = 0; |
| 584 | tr->max_latency = 0; |
| 585 | save_tracing_thresh = tracing_thresh; |
| 586 | |
| 587 | /* tracing_thresh is in nsecs, we speak in usecs */ |
| 588 | if (!tracing_thresh) |
| 589 | tracing_thresh = last_tracing_thresh; |
| 590 | |
| 591 | if (tracer_tracing_is_on(tr)) |
| 592 | hwlat_tracer_start(tr); |
| 593 | |
| 594 | hwlat_busy = true; |
| 595 | |
| 596 | return 0; |
| 597 | } |
| 598 | |
| 599 | static void hwlat_tracer_reset(struct trace_array *tr) |
| 600 | { |
| 601 | stop_kthread(); |
| 602 | |
| 603 | /* the tracing threshold is static between runs */ |
| 604 | last_tracing_thresh = tracing_thresh; |
| 605 | |
| 606 | tracing_thresh = save_tracing_thresh; |
| 607 | hwlat_busy = false; |
| 608 | } |
| 609 | |
| 610 | static struct tracer hwlat_tracer __read_mostly = |
| 611 | { |
| 612 | .name = "hwlat", |
| 613 | .init = hwlat_tracer_init, |
| 614 | .reset = hwlat_tracer_reset, |
| 615 | .start = hwlat_tracer_start, |
| 616 | .stop = hwlat_tracer_stop, |
| 617 | .allow_instances = true, |
| 618 | }; |
| 619 | |
| 620 | __init static int init_hwlat_tracer(void) |
| 621 | { |
| 622 | int ret; |
| 623 | |
| 624 | mutex_init(&hwlat_data.lock); |
| 625 | |
| 626 | ret = register_tracer(&hwlat_tracer); |
| 627 | if (ret) |
| 628 | return ret; |
| 629 | |
| 630 | init_tracefs(); |
| 631 | |
| 632 | return 0; |
| 633 | } |
| 634 | late_initcall(init_hwlat_tracer); |