blob: f4938040c2286e6be613ca34cd767bee5dcaaeae [file] [log] [blame]
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * preemptoff and irqoff tracepoints
4 *
5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6 */
7
8#include <linux/kallsyms.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/ftrace.h>
Masami Hiramatsueeeb0802019-02-13 01:13:40 +090012#include <linux/kprobes.h>
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -040013#include "trace.h"
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070014
15#define CREATE_TRACE_POINTS
16#include <trace/events/preemptirq.h>
17
18#ifdef CONFIG_TRACE_IRQFLAGS
19/* Per-cpu variable to prevent redundant calls when IRQs already off */
20static DEFINE_PER_CPU(int, tracing_irq_cpu);
21
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010022/*
23 * Like trace_hardirqs_on() but without the lockdep invocation. This is
24 * used in the low level entry code where the ordering vs. RCU is important
25 * and lockdep uses a staged approach which splits the lockdep hardirq
26 * tracking into a RCU on and a RCU off section.
27 */
28void trace_hardirqs_on_prepare(void)
29{
30 if (this_cpu_read(tracing_irq_cpu)) {
31 if (!in_nmi())
32 trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
33 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
34 this_cpu_write(tracing_irq_cpu, 0);
35 }
36}
37EXPORT_SYMBOL(trace_hardirqs_on_prepare);
38NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
39
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070040void trace_hardirqs_on(void)
41{
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040042 if (this_cpu_read(tracing_irq_cpu)) {
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -040043 if (!in_nmi())
44 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
45 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040046 this_cpu_write(tracing_irq_cpu, 0);
47 }
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070048
Peter Zijlstrac86e9b92020-03-18 14:22:03 +010049 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040050 lockdep_hardirqs_on(CALLER_ADDR0);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070051}
52EXPORT_SYMBOL(trace_hardirqs_on);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +090053NOKPROBE_SYMBOL(trace_hardirqs_on);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070054
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010055/*
56 * Like trace_hardirqs_off() but without the lockdep invocation. This is
57 * used in the low level entry code where the ordering vs. RCU is important
58 * and lockdep uses a staged approach which splits the lockdep hardirq
59 * tracking into a RCU on and a RCU off section.
60 */
Peter Zijlstrabf2b3002020-05-29 23:27:40 +020061void trace_hardirqs_off_finish(void)
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010062{
63 if (!this_cpu_read(tracing_irq_cpu)) {
64 this_cpu_write(tracing_irq_cpu, 1);
65 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
66 if (!in_nmi())
67 trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
68 }
69
70}
Peter Zijlstrabf2b3002020-05-29 23:27:40 +020071EXPORT_SYMBOL(trace_hardirqs_off_finish);
72NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
Thomas Gleixner0995a5d2020-03-04 13:09:50 +010073
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070074void trace_hardirqs_off(void)
75{
Peter Zijlstrabf2b3002020-05-29 23:27:40 +020076 lockdep_hardirqs_off(CALLER_ADDR0);
77
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040078 if (!this_cpu_read(tracing_irq_cpu)) {
79 this_cpu_write(tracing_irq_cpu, 1);
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -040080 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
81 if (!in_nmi())
82 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040083 }
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070084}
85EXPORT_SYMBOL(trace_hardirqs_off);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +090086NOKPROBE_SYMBOL(trace_hardirqs_off);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070087
88__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
89{
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040090 if (this_cpu_read(tracing_irq_cpu)) {
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -040091 if (!in_nmi())
92 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
93 tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040094 this_cpu_write(tracing_irq_cpu, 0);
95 }
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070096
Peter Zijlstrac86e9b92020-03-18 14:22:03 +010097 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -040098 lockdep_hardirqs_on(CALLER_ADDR0);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -070099}
100EXPORT_SYMBOL(trace_hardirqs_on_caller);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900101NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700102
103__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
104{
Sven Schnelle73ac74c2020-09-10 12:24:53 +0200105 lockdep_hardirqs_off(CALLER_ADDR0);
106
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -0400107 if (!this_cpu_read(tracing_irq_cpu)) {
108 this_cpu_write(tracing_irq_cpu, 1);
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400109 tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
110 if (!in_nmi())
111 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -0400112 }
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700113}
114EXPORT_SYMBOL(trace_hardirqs_off_caller);
Masami Hiramatsueeeb0802019-02-13 01:13:40 +0900115NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700116#endif /* CONFIG_TRACE_IRQFLAGS */
117
118#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
119
120void trace_preempt_on(unsigned long a0, unsigned long a1)
121{
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400122 if (!in_nmi())
123 trace_preempt_enable_rcuidle(a0, a1);
124 tracer_preempt_on(a0, a1);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700125}
126
127void trace_preempt_off(unsigned long a0, unsigned long a1)
128{
Steven Rostedt (VMware)3f1756d2018-08-08 21:28:05 -0400129 if (!in_nmi())
130 trace_preempt_disable_rcuidle(a0, a1);
131 tracer_preempt_off(a0, a1);
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -0700132}
133#endif