Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 2 | /* |
| 3 | * include/linux/irqflags.h |
| 4 | * |
| 5 | * IRQ flags tracing: follow the state of the hardirq and softirq flags and |
| 6 | * provide callbacks for transitions between ON and OFF states. |
| 7 | * |
| 8 | * This file gets included from lowlevel asm headers too, to provide |
| 9 | * wrapped versions of the local_irq_*() APIs, based on the |
| 10 | * raw_local_irq_*() macros from the lowlevel headers. |
| 11 | */ |
| 12 | #ifndef _LINUX_TRACE_IRQFLAGS_H |
| 13 | #define _LINUX_TRACE_IRQFLAGS_H |
| 14 | |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 15 | #include <linux/typecheck.h> |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 16 | #include <asm/irqflags.h> |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 17 | #include <asm/percpu.h> |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 18 | |
Peter Zijlstra | 0d38453 | 2020-03-20 12:56:41 +0100 | [diff] [blame] | 19 | /* Currently lockdep_softirqs_on/off is used only by lockdep */ |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 20 | #ifdef CONFIG_PROVE_LOCKING |
Peter Zijlstra | 0d38453 | 2020-03-20 12:56:41 +0100 | [diff] [blame] | 21 | extern void lockdep_softirqs_on(unsigned long ip); |
| 22 | extern void lockdep_softirqs_off(unsigned long ip); |
Peter Zijlstra | c86e9b9 | 2020-03-18 14:22:03 +0100 | [diff] [blame] | 23 | extern void lockdep_hardirqs_on_prepare(unsigned long ip); |
Steven Rostedt (VMware) | bff1b20 | 2018-08-06 15:50:58 -0400 | [diff] [blame] | 24 | extern void lockdep_hardirqs_on(unsigned long ip); |
| 25 | extern void lockdep_hardirqs_off(unsigned long ip); |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 26 | #else |
Peter Zijlstra | 0d38453 | 2020-03-20 12:56:41 +0100 | [diff] [blame] | 27 | static inline void lockdep_softirqs_on(unsigned long ip) { } |
| 28 | static inline void lockdep_softirqs_off(unsigned long ip) { } |
Peter Zijlstra | c86e9b9 | 2020-03-18 14:22:03 +0100 | [diff] [blame] | 29 | static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { } |
Steven Rostedt (VMware) | bff1b20 | 2018-08-06 15:50:58 -0400 | [diff] [blame] | 30 | static inline void lockdep_hardirqs_on(unsigned long ip) { } |
| 31 | static inline void lockdep_hardirqs_off(unsigned long ip) { } |
Joel Fernandes (Google) | c3bc8fd | 2018-07-30 15:24:23 -0700 | [diff] [blame] | 32 | #endif |
| 33 | |
| 34 | #ifdef CONFIG_TRACE_IRQFLAGS |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 35 | |
Marco Elver | 0584df9 | 2020-07-29 13:09:15 +0200 | [diff] [blame] | 36 | /* Per-task IRQ trace events information. */ |
| 37 | struct irqtrace_events { |
| 38 | unsigned int irq_events; |
| 39 | unsigned long hardirq_enable_ip; |
| 40 | unsigned long hardirq_disable_ip; |
| 41 | unsigned int hardirq_enable_event; |
| 42 | unsigned int hardirq_disable_event; |
| 43 | unsigned long softirq_disable_ip; |
| 44 | unsigned long softirq_enable_ip; |
| 45 | unsigned int softirq_disable_event; |
| 46 | unsigned int softirq_enable_event; |
| 47 | }; |
| 48 | |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 49 | DECLARE_PER_CPU(int, hardirqs_enabled); |
| 50 | DECLARE_PER_CPU(int, hardirq_context); |
| 51 | |
Peter Zijlstra | 00b0ed2 | 2020-08-12 19:28:06 +0200 | [diff] [blame] | 52 | extern void trace_hardirqs_on_prepare(void); |
| 53 | extern void trace_hardirqs_off_finish(void); |
| 54 | extern void trace_hardirqs_on(void); |
| 55 | extern void trace_hardirqs_off(void); |
| 56 | |
Peter Zijlstra | fddf905 | 2020-08-20 09:13:30 +0200 | [diff] [blame] | 57 | # define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) |
Peter Zijlstra | ef99691 | 2020-03-20 12:56:42 +0100 | [diff] [blame] | 58 | # define lockdep_softirq_context(p) ((p)->softirq_context) |
Peter Zijlstra | f9ad4a5 | 2020-05-27 13:03:26 +0200 | [diff] [blame] | 59 | # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) |
Peter Zijlstra | ef99691 | 2020-03-20 12:56:42 +0100 | [diff] [blame] | 60 | # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 61 | # define lockdep_hardirq_enter() \ |
| 62 | do { \ |
Peter Zijlstra | fddf905 | 2020-08-20 09:13:30 +0200 | [diff] [blame] | 63 | if (__this_cpu_inc_return(hardirq_context) == 1)\ |
Peter Zijlstra | a21ee60 | 2020-05-25 12:22:41 +0200 | [diff] [blame] | 64 | current->hardirq_threaded = 0; \ |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 65 | } while (0) |
Linus Torvalds | d5f744f | 2020-03-30 19:14:28 -0700 | [diff] [blame] | 66 | # define lockdep_hardirq_threaded() \ |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 67 | do { \ |
| 68 | current->hardirq_threaded = 1; \ |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 69 | } while (0) |
Thomas Gleixner | 2502ec3 | 2020-03-20 12:56:40 +0100 | [diff] [blame] | 70 | # define lockdep_hardirq_exit() \ |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 71 | do { \ |
Peter Zijlstra | fddf905 | 2020-08-20 09:13:30 +0200 | [diff] [blame] | 72 | __this_cpu_dec(hardirq_context); \ |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 73 | } while (0) |
| 74 | # define lockdep_softirq_enter() \ |
| 75 | do { \ |
| 76 | current->softirq_context++; \ |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 77 | } while (0) |
| 78 | # define lockdep_softirq_exit() \ |
| 79 | do { \ |
| 80 | current->softirq_context--; \ |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 81 | } while (0) |
Sebastian Andrzej Siewior | 40db173 | 2020-03-21 12:26:02 +0100 | [diff] [blame] | 82 | |
| 83 | # define lockdep_hrtimer_enter(__hrtimer) \ |
Sebastian Andrzej Siewior | 73d2056 | 2020-03-31 22:18:49 +0200 | [diff] [blame] | 84 | ({ \ |
| 85 | bool __expires_hardirq = true; \ |
| 86 | \ |
| 87 | if (!__hrtimer->is_hard) { \ |
| 88 | current->irq_config = 1; \ |
| 89 | __expires_hardirq = false; \ |
| 90 | } \ |
| 91 | __expires_hardirq; \ |
| 92 | }) |
Sebastian Andrzej Siewior | 40db173 | 2020-03-21 12:26:02 +0100 | [diff] [blame] | 93 | |
Sebastian Andrzej Siewior | 73d2056 | 2020-03-31 22:18:49 +0200 | [diff] [blame] | 94 | # define lockdep_hrtimer_exit(__expires_hardirq) \ |
| 95 | do { \ |
| 96 | if (!__expires_hardirq) \ |
Sebastian Andrzej Siewior | 40db173 | 2020-03-21 12:26:02 +0100 | [diff] [blame] | 97 | current->irq_config = 0; \ |
Sebastian Andrzej Siewior | 73d2056 | 2020-03-31 22:18:49 +0200 | [diff] [blame] | 98 | } while (0) |
Sebastian Andrzej Siewior | 40db173 | 2020-03-21 12:26:02 +0100 | [diff] [blame] | 99 | |
Sebastian Andrzej Siewior | d53f2b6 | 2020-03-21 12:26:04 +0100 | [diff] [blame] | 100 | # define lockdep_posixtimer_enter() \ |
| 101 | do { \ |
| 102 | current->irq_config = 1; \ |
| 103 | } while (0) |
| 104 | |
| 105 | # define lockdep_posixtimer_exit() \ |
| 106 | do { \ |
| 107 | current->irq_config = 0; \ |
| 108 | } while (0) |
| 109 | |
Sebastian Andrzej Siewior | 49915ac | 2020-03-21 12:26:03 +0100 | [diff] [blame] | 110 | # define lockdep_irq_work_enter(__work) \ |
| 111 | do { \ |
| 112 | if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\ |
| 113 | current->irq_config = 1; \ |
| 114 | } while (0) |
| 115 | # define lockdep_irq_work_exit(__work) \ |
| 116 | do { \ |
| 117 | if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\ |
| 118 | current->irq_config = 0; \ |
| 119 | } while (0) |
| 120 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 121 | #else |
Thomas Gleixner | 0995a5d | 2020-03-04 13:09:50 +0100 | [diff] [blame] | 122 | # define trace_hardirqs_on_prepare() do { } while (0) |
Peter Zijlstra | bf2b300 | 2020-05-29 23:27:40 +0200 | [diff] [blame] | 123 | # define trace_hardirqs_off_finish() do { } while (0) |
Peter Zijlstra | 00b0ed2 | 2020-08-12 19:28:06 +0200 | [diff] [blame] | 124 | # define trace_hardirqs_on() do { } while (0) |
| 125 | # define trace_hardirqs_off() do { } while (0) |
| 126 | # define lockdep_hardirq_context() 0 |
| 127 | # define lockdep_softirq_context(p) 0 |
| 128 | # define lockdep_hardirqs_enabled() 0 |
| 129 | # define lockdep_softirqs_enabled(p) 0 |
| 130 | # define lockdep_hardirq_enter() do { } while (0) |
| 131 | # define lockdep_hardirq_threaded() do { } while (0) |
| 132 | # define lockdep_hardirq_exit() do { } while (0) |
| 133 | # define lockdep_softirq_enter() do { } while (0) |
| 134 | # define lockdep_softirq_exit() do { } while (0) |
Sebastian Andrzej Siewior | 73d2056 | 2020-03-31 22:18:49 +0200 | [diff] [blame] | 135 | # define lockdep_hrtimer_enter(__hrtimer) false |
| 136 | # define lockdep_hrtimer_exit(__context) do { } while (0) |
Sebastian Andrzej Siewior | d53f2b6 | 2020-03-21 12:26:04 +0100 | [diff] [blame] | 137 | # define lockdep_posixtimer_enter() do { } while (0) |
| 138 | # define lockdep_posixtimer_exit() do { } while (0) |
Sebastian Andrzej Siewior | 49915ac | 2020-03-21 12:26:03 +0100 | [diff] [blame] | 139 | # define lockdep_irq_work_enter(__work) do { } while (0) |
| 140 | # define lockdep_irq_work_exit(__work) do { } while (0) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 141 | #endif |
| 142 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 143 | #if defined(CONFIG_IRQSOFF_TRACER) || \ |
| 144 | defined(CONFIG_PREEMPT_TRACER) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 145 | extern void stop_critical_timings(void); |
| 146 | extern void start_critical_timings(void); |
| 147 | #else |
| 148 | # define stop_critical_timings() do { } while (0) |
| 149 | # define start_critical_timings() do { } while (0) |
| 150 | #endif |
| 151 | |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 152 | /* |
| 153 | * Wrap the arch provided IRQ routines to provide appropriate checks. |
| 154 | */ |
| 155 | #define raw_local_irq_disable() arch_local_irq_disable() |
| 156 | #define raw_local_irq_enable() arch_local_irq_enable() |
| 157 | #define raw_local_irq_save(flags) \ |
| 158 | do { \ |
| 159 | typecheck(unsigned long, flags); \ |
| 160 | flags = arch_local_irq_save(); \ |
| 161 | } while (0) |
| 162 | #define raw_local_irq_restore(flags) \ |
| 163 | do { \ |
| 164 | typecheck(unsigned long, flags); \ |
| 165 | arch_local_irq_restore(flags); \ |
| 166 | } while (0) |
| 167 | #define raw_local_save_flags(flags) \ |
| 168 | do { \ |
| 169 | typecheck(unsigned long, flags); \ |
| 170 | flags = arch_local_save_flags(); \ |
| 171 | } while (0) |
| 172 | #define raw_irqs_disabled_flags(flags) \ |
| 173 | ({ \ |
| 174 | typecheck(unsigned long, flags); \ |
| 175 | arch_irqs_disabled_flags(flags); \ |
| 176 | }) |
| 177 | #define raw_irqs_disabled() (arch_irqs_disabled()) |
| 178 | #define raw_safe_halt() arch_safe_halt() |
| 179 | |
| 180 | /* |
| 181 | * The local_irq_*() APIs are equal to the raw_local_irq*() |
| 182 | * if !TRACE_IRQFLAGS. |
| 183 | */ |
Jan Beulich | db2dcb4 | 2015-01-20 13:00:46 +0000 | [diff] [blame] | 184 | #ifdef CONFIG_TRACE_IRQFLAGS |
Peter Zijlstra | 00b0ed2 | 2020-08-12 19:28:06 +0200 | [diff] [blame] | 185 | |
| 186 | #define local_irq_enable() \ |
| 187 | do { \ |
| 188 | trace_hardirqs_on(); \ |
| 189 | raw_local_irq_enable(); \ |
| 190 | } while (0) |
| 191 | |
| 192 | #define local_irq_disable() \ |
| 193 | do { \ |
Nicholas Piggin | 044d0d6 | 2020-07-23 20:56:14 +1000 | [diff] [blame] | 194 | bool was_disabled = raw_irqs_disabled();\ |
Peter Zijlstra | 00b0ed2 | 2020-08-12 19:28:06 +0200 | [diff] [blame] | 195 | raw_local_irq_disable(); \ |
Nicholas Piggin | 044d0d6 | 2020-07-23 20:56:14 +1000 | [diff] [blame] | 196 | if (!was_disabled) \ |
| 197 | trace_hardirqs_off(); \ |
Peter Zijlstra | 00b0ed2 | 2020-08-12 19:28:06 +0200 | [diff] [blame] | 198 | } while (0) |
| 199 | |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 200 | #define local_irq_save(flags) \ |
| 201 | do { \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 202 | raw_local_irq_save(flags); \ |
Nicholas Piggin | 044d0d6 | 2020-07-23 20:56:14 +1000 | [diff] [blame] | 203 | if (!raw_irqs_disabled_flags(flags)) \ |
| 204 | trace_hardirqs_off(); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 205 | } while (0) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 206 | |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 207 | #define local_irq_restore(flags) \ |
| 208 | do { \ |
Nicholas Piggin | 044d0d6 | 2020-07-23 20:56:14 +1000 | [diff] [blame] | 209 | if (!raw_irqs_disabled_flags(flags)) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 210 | trace_hardirqs_on(); \ |
Nicholas Piggin | 044d0d6 | 2020-07-23 20:56:14 +1000 | [diff] [blame] | 211 | raw_local_irq_restore(flags); \ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 212 | } while (0) |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 213 | |
| 214 | #define safe_halt() \ |
| 215 | do { \ |
| 216 | trace_hardirqs_on(); \ |
| 217 | raw_safe_halt(); \ |
| 218 | } while (0) |
| 219 | |
| 220 | |
Jan Beulich | db2dcb4 | 2015-01-20 13:00:46 +0000 | [diff] [blame] | 221 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 222 | |
| 223 | #define local_irq_enable() do { raw_local_irq_enable(); } while (0) |
| 224 | #define local_irq_disable() do { raw_local_irq_disable(); } while (0) |
Peter Zijlstra | 00b0ed2 | 2020-08-12 19:28:06 +0200 | [diff] [blame] | 225 | #define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0) |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 226 | #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 227 | #define safe_halt() do { raw_safe_halt(); } while (0) |
| 228 | |
Jan Beulich | db2dcb4 | 2015-01-20 13:00:46 +0000 | [diff] [blame] | 229 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
| 230 | |
| 231 | #define local_save_flags(flags) raw_local_save_flags(flags) |
| 232 | |
| 233 | /* |
| 234 | * Some architectures don't define arch_irqs_disabled(), so even if either |
| 235 | * definition would be fine we need to use different ones for the time being |
| 236 | * to avoid build issues. |
| 237 | */ |
| 238 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
| 239 | #define irqs_disabled() \ |
| 240 | ({ \ |
| 241 | unsigned long _flags; \ |
| 242 | raw_local_save_flags(_flags); \ |
| 243 | raw_irqs_disabled_flags(_flags); \ |
| 244 | }) |
| 245 | #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
| 246 | #define irqs_disabled() raw_irqs_disabled() |
Michael Neuling | 40b1f4e | 2009-10-22 14:39:28 +1100 | [diff] [blame] | 247 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 248 | |
Jan Beulich | db2dcb4 | 2015-01-20 13:00:46 +0000 | [diff] [blame] | 249 | #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) |
| 250 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 251 | #endif |