blob: 4702efb00ff21e44e1b0fefbf2b76077068655a9 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Ingo Molnar14131f22009-02-26 18:47:11 +01002/*
3 * tracing clocks
4 *
5 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 *
7 * Implements 3 trace clock variants, with differing scalability/precision
8 * tradeoffs:
9 *
10 * - local: CPU-local trace clock
11 * - medium: scalable global clock with some jitter
12 * - global: globally monotonic, serialized clock
13 *
14 * Tracer plugins will chose a default from these clocks.
15 */
16#include <linux/spinlock.h>
Frederic Weisbeckerae1f3032010-02-28 19:42:38 +010017#include <linux/irqflags.h>
Ingo Molnar14131f22009-02-26 18:47:11 +010018#include <linux/hardirq.h>
19#include <linux/module.h>
20#include <linux/percpu.h>
21#include <linux/sched.h>
Ingo Molnare6017572017-02-01 16:36:40 +010022#include <linux/sched/clock.h>
Ingo Molnar14131f22009-02-26 18:47:11 +010023#include <linux/ktime.h>
Dmitri Vorobievb8b94262009-03-22 19:11:11 +020024#include <linux/trace_clock.h>
Ingo Molnar14131f22009-02-26 18:47:11 +010025
26/*
27 * trace_clock_local(): the simplest and least coherent tracing clock.
28 *
29 * Useful for tracing that does not cross to other CPUs nor
30 * does it go through idle events.
31 */
32u64 notrace trace_clock_local(void)
33{
Peter Zijlstra6cc3c6e2009-03-10 19:03:43 +010034 u64 clock;
35
Ingo Molnar14131f22009-02-26 18:47:11 +010036 /*
37 * sched_clock() is an architecture implemented, fast, scalable,
38 * lockless clock. It is not guaranteed to be coherent across
39 * CPUs, nor across CPU idle events.
40 */
Steven Rostedt5168ae52010-06-03 09:36:50 -040041 preempt_disable_notrace();
Peter Zijlstra6cc3c6e2009-03-10 19:03:43 +010042 clock = sched_clock();
Steven Rostedt5168ae52010-06-03 09:36:50 -040043 preempt_enable_notrace();
Peter Zijlstra6cc3c6e2009-03-10 19:03:43 +010044
45 return clock;
Ingo Molnar14131f22009-02-26 18:47:11 +010046}
Paul E. McKenneydc975e92012-11-15 11:27:26 -080047EXPORT_SYMBOL_GPL(trace_clock_local);
Ingo Molnar14131f22009-02-26 18:47:11 +010048
49/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -030050 * trace_clock(): 'between' trace clock. Not completely serialized,
Ingo Molnar14131f22009-02-26 18:47:11 +010051 * but not completely incorrect when crossing CPUs either.
52 *
53 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
54 * jitter between CPUs. So it's a pretty scalable clock, but there
55 * can be offsets in the trace data.
56 */
57u64 notrace trace_clock(void)
58{
Peter Zijlstrac6763292010-05-25 10:48:51 +020059 return local_clock();
Ingo Molnar14131f22009-02-26 18:47:11 +010060}
Jerry Snitselaar7e255d32015-04-30 08:10:24 -070061EXPORT_SYMBOL_GPL(trace_clock);
Ingo Molnar14131f22009-02-26 18:47:11 +010062
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -040063/*
64 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
Tony Luck58d4e212014-07-18 11:43:01 -070065 * Note that this use of jiffies_64 is not completely safe on
66 * 32-bit systems. But the window is tiny, and the effect if
67 * we are affected is that we will have an obviously bogus
68 * timestamp on a trace event - i.e. not life threatening.
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -040069 */
70u64 notrace trace_clock_jiffies(void)
71{
Tony Luck58d4e212014-07-18 11:43:01 -070072 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -040073}
Jerry Snitselaar7e255d32015-04-30 08:10:24 -070074EXPORT_SYMBOL_GPL(trace_clock_jiffies);
Ingo Molnar14131f22009-02-26 18:47:11 +010075
76/*
77 * trace_clock_global(): special globally coherent trace clock
78 *
79 * It has higher overhead than the other trace clocks but is still
80 * an order of magnitude faster than GTOD derived hardware clocks.
81 *
82 * Used by plugins that need globally coherent timestamps.
83 */
84
Steven Rostedt6ca6cca2009-09-15 12:24:22 -040085/* keep prev_time and lock in the same cacheline. */
86static struct {
87 u64 prev_time;
Thomas Gleixner445c8952009-12-02 19:49:50 +010088 arch_spinlock_t lock;
Steven Rostedt6ca6cca2009-09-15 12:24:22 -040089} trace_clock_struct ____cacheline_aligned_in_smp =
90 {
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010091 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
Steven Rostedt6ca6cca2009-09-15 12:24:22 -040092 };
Ingo Molnar14131f22009-02-26 18:47:11 +010093
94u64 notrace trace_clock_global(void)
95{
96 unsigned long flags;
97 int this_cpu;
Steven Rostedt (VMware)aafe1042021-04-30 12:17:58 -040098 u64 now, prev_time;
Ingo Molnar14131f22009-02-26 18:47:11 +010099
Steven Rostedt (VMware)f7a15702018-04-04 14:50:15 -0400100 raw_local_irq_save(flags);
Ingo Molnar14131f22009-02-26 18:47:11 +0100101
102 this_cpu = raw_smp_processor_id();
Steven Rostedt (VMware)aafe1042021-04-30 12:17:58 -0400103
Ingo Molnar14131f22009-02-26 18:47:11 +0100104 /*
Steven Rostedt (VMware)aafe1042021-04-30 12:17:58 -0400105 * The global clock "guarantees" that the events are ordered
106 * between CPUs. But if two events on two different CPUS call
107 * trace_clock_global at roughly the same time, it really does
108 * not matter which one gets the earlier time. Just make sure
109 * that the same CPU will always show a monotonic clock.
110 *
111 * Use a read memory barrier to get the latest written
112 * time that was recorded.
113 */
114 smp_rmb();
115 prev_time = READ_ONCE(trace_clock_struct.prev_time);
116 now = sched_clock_cpu(this_cpu);
117
Steven Rostedt (VMware)89529d82021-06-17 17:12:35 -0400118 /* Make sure that now is always greater than or equal to prev_time */
Steven Rostedt (VMware)aafe1042021-04-30 12:17:58 -0400119 if ((s64)(now - prev_time) < 0)
Steven Rostedt (VMware)89529d82021-06-17 17:12:35 -0400120 now = prev_time;
Steven Rostedt (VMware)aafe1042021-04-30 12:17:58 -0400121
122 /*
123 * If in an NMI context then dont risk lockups and simply return
124 * the current time.
Ingo Molnar14131f22009-02-26 18:47:11 +0100125 */
126 if (unlikely(in_nmi()))
127 goto out;
128
Steven Rostedt (VMware)aafe1042021-04-30 12:17:58 -0400129 /* Tracing can cause strange recursion, always use a try lock */
130 if (arch_spin_trylock(&trace_clock_struct.lock)) {
131 /* Reread prev_time in case it was already updated */
132 prev_time = READ_ONCE(trace_clock_struct.prev_time);
133 if ((s64)(now - prev_time) < 0)
Steven Rostedt (VMware)89529d82021-06-17 17:12:35 -0400134 now = prev_time;
Ingo Molnar14131f22009-02-26 18:47:11 +0100135
Steven Rostedt (VMware)aafe1042021-04-30 12:17:58 -0400136 trace_clock_struct.prev_time = now;
Ingo Molnar14131f22009-02-26 18:47:11 +0100137
Steven Rostedt (VMware)aafe1042021-04-30 12:17:58 -0400138 /* The unlock acts as the wmb for the above rmb */
139 arch_spin_unlock(&trace_clock_struct.lock);
140 }
Ingo Molnar14131f22009-02-26 18:47:11 +0100141 out:
Steven Rostedt (VMware)f7a15702018-04-04 14:50:15 -0400142 raw_local_irq_restore(flags);
Ingo Molnar14131f22009-02-26 18:47:11 +0100143
144 return now;
145}
Jerry Snitselaar7e255d32015-04-30 08:10:24 -0700146EXPORT_SYMBOL_GPL(trace_clock_global);
Steven Rostedt62496872011-09-19 11:35:58 -0400147
148static atomic64_t trace_counter;
149
150/*
151 * trace_clock_counter(): simply an atomic counter.
152 * Use the trace_counter "counter" for cases where you do not care
153 * about timings, but are interested in strict ordering.
154 */
155u64 notrace trace_clock_counter(void)
156{
157 return atomic64_add_return(1, &trace_counter);
158}